diff --git a/.maintain/rename-crates-for-2.0.sh b/.maintain/rename-crates-for-2.0.sh index d2bd871f09780..b024cfd4418c6 100644 --- a/.maintain/rename-crates-for-2.0.sh +++ b/.maintain/rename-crates-for-2.0.sh @@ -43,6 +43,7 @@ TO_RENAME=( "sp-finality-granpda sp-finality-grandpa" "sp-sesssion sp-session" "sp-tracing-pool sp-transaction-pool" + "sc-basic-authority sc-basic-authorship" # PRIMITIVES "substrate-application-crypto sp-application-crypto" @@ -88,7 +89,7 @@ TO_RENAME=( "substrate-client sc-client" "substrate-client-api sc-api" "substrate-authority-discovery sc-authority-discovery" - "substrate-basic-authorship sc-basic-authority" + "substrate-basic-authorship sc-basic-authorship" "substrate-block-builder sc-block-builder" "substrate-chain-spec sc-chain-spec" "substrate-chain-spec-derive sc-chain-spec-derive" diff --git a/Cargo.lock b/Cargo.lock index 60a7a4e1ad20f..b8df759cf4087 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -371,7 +371,7 @@ dependencies = [ [[package]] name = "browser-utils" -version = "2.0.0" +version = "0.8.0" dependencies = [ "clear_on_drop 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "console_error_panic_hook 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -386,7 +386,7 @@ dependencies = [ "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "sc-chain-spec 2.0.0", "sc-network 0.8.0", - "sc-service 2.0.0", + "sc-service 0.8.0", "wasm-bindgen 0.2.57 (registry+https://github.com/rust-lang/crates.io-index)", "wasm-bindgen-futures 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1087,7 +1087,7 @@ dependencies = [ [[package]] name = "environmental" -version = "1.0.2" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -1231,13 +1231,13 @@ dependencies = [ [[package]] name = "finality-grandpa" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "num-traits 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1249,7 +1249,7 @@ dependencies = [ "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-hex 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-hex 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "static_assertions 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1292,7 +1292,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" name = "fork-tree" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1305,7 +1305,7 @@ dependencies = [ "pallet-balances 2.0.0", "pallet-indices 2.0.0", "pallet-transaction-payment 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-io 2.0.0", @@ -1315,9 +1315,9 @@ dependencies = [ [[package]] name = "frame-metadata" -version = "2.0.0" +version = "10.0.0" dependencies = [ - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-std 2.0.0", @@ -1328,13 +1328,13 @@ name = "frame-support" version = "2.0.0" dependencies = [ "bitmask 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", - "frame-metadata 2.0.0", + "frame-metadata 10.0.0", "frame-support-procedural 2.0.0", "frame-system 2.0.0", "impl-trait-for-tuples 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "once_cell 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "paste 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "pretty_assertions 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1343,7 +1343,7 @@ dependencies = [ "sp-inherents 2.0.0", "sp-io 2.0.0", "sp-runtime 2.0.0", - "sp-state-machine 2.0.0", + "sp-state-machine 0.8.0", "sp-std 2.0.0", "tracing 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1383,14 +1383,14 @@ name = "frame-support-test" version = "2.0.0" dependencies = [ "frame-support 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "pretty_assertions 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-inherents 2.0.0", "sp-io 2.0.0", "sp-runtime 2.0.0", - "sp-state-machine 2.0.0", + "sp-state-machine 0.8.0", "trybuild 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1401,21 +1401,22 @@ dependencies = [ "criterion 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "frame-support 2.0.0", "impl-trait-for-tuples 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "safe-mix 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", + "sp-externalities 0.8.0", "sp-io 2.0.0", "sp-runtime 2.0.0", "sp-std 2.0.0", "sp-version 2.0.0", + "substrate-test-runtime-client 2.0.0", ] [[package]] name = "frame-system-rpc-runtime-api" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "sp-api 2.0.0", ] @@ -2013,7 +2014,7 @@ name = "impl-codec" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2827,7 +2828,7 @@ dependencies = [ [[package]] name = "memory-db" -version = "0.18.0" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "ahash 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2992,11 +2993,10 @@ dependencies = [ name = "node-cli" version = "2.0.0" dependencies = [ - "browser-utils 2.0.0", + "browser-utils 0.8.0", "ctrlc 3.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "frame-support 2.0.0", "frame-system 2.0.0", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "hex-literal 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 14.0.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3005,7 +3005,7 @@ dependencies = [ "node-primitives 2.0.0", "node-rpc 2.0.0", "node-runtime 2.0.0", - "node-transaction-factory 2.0.0", + "node-transaction-factory 0.8.0", "pallet-authority-discovery 2.0.0", "pallet-balances 2.0.0", "pallet-contracts 2.0.0", @@ -3013,22 +3013,22 @@ dependencies = [ "pallet-indices 2.0.0", "pallet-timestamp 2.0.0", "pallet-transaction-payment 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "sc-authority-discovery 2.0.0", - "sc-basic-authority 2.0.0", + "sc-authority-discovery 0.8.0", + "sc-basic-authorship 0.8.0", "sc-chain-spec 2.0.0", - "sc-cli 2.0.0", - "sc-client 2.0.0", + "sc-cli 0.8.0", + "sc-client 0.8.0", "sc-client-api 2.0.0", - "sc-client-db 2.0.0", + "sc-client-db 0.8.0", "sc-consensus-babe 0.8.0", - "sc-finality-grandpa 2.0.0", + "sc-finality-grandpa 0.8.0", "sc-keystore 2.0.0", "sc-network 0.8.0", "sc-offchain 2.0.0", "sc-rpc 2.0.0", - "sc-service 2.0.0", + "sc-service 0.8.0", "sc-service-test 2.0.0", "sc-telemetry 2.0.0", "sc-transaction-pool 2.0.0", @@ -3048,7 +3048,7 @@ dependencies = [ "structopt 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "substrate-build-script-utils 2.0.0", "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", "vergen 3.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "wasm-bindgen 0.2.57 (registry+https://github.com/rust-lang/crates.io-index)", "wasm-bindgen-futures 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3073,13 +3073,13 @@ dependencies = [ "pallet-timestamp 2.0.0", "pallet-transaction-payment 2.0.0", "pallet-treasury 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sc-executor 2.0.0", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "sc-executor 0.8.0", "sp-application-crypto 2.0.0", "sp-core 2.0.0", "sp-io 2.0.0", "sp-runtime 2.0.0", - "sp-state-machine 2.0.0", + "sp-state-machine 0.8.0", "sp-trie 2.0.0", "substrate-test-client 2.0.0", "trie-root 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3103,9 +3103,9 @@ dependencies = [ "jsonrpc-core 14.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "node-primitives 2.0.0", "node-runtime 2.0.0", - "pallet-contracts-rpc 2.0.0", + "pallet-contracts-rpc 0.8.0", "pallet-transaction-payment-rpc 2.0.0", - "sc-client 2.0.0", + "sc-client 0.8.0", "sp-api 2.0.0", "sp-runtime 2.0.0", "sp-transaction-pool 2.0.0", @@ -3141,18 +3141,20 @@ dependencies = [ "pallet-balances 2.0.0", "pallet-collective 2.0.0", "pallet-contracts 2.0.0", - "pallet-contracts-rpc-runtime-api 2.0.0", + "pallet-contracts-rpc-runtime-api 0.8.0", "pallet-democracy 2.0.0", "pallet-elections-phragmen 2.0.0", "pallet-finality-tracker 2.0.0", "pallet-grandpa 2.0.0", + "pallet-identity 2.0.0", "pallet-im-online 2.0.0", "pallet-indices 2.0.0", "pallet-membership 2.0.0", - "pallet-nicks 2.0.0", "pallet-offences 2.0.0", "pallet-randomness-collective-flip 2.0.0", + "pallet-recovery 2.0.0", "pallet-session 2.0.0", + "pallet-society 2.0.0", "pallet-staking 2.0.0", "pallet-staking-reward-curve 2.0.0", "pallet-sudo 2.0.0", @@ -3161,9 +3163,8 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api 2.0.0", "pallet-treasury 2.0.0", "pallet-utility 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-hex 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "safe-mix 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-hex 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-api 2.0.0", "sp-authority-discovery 2.0.0", @@ -3188,20 +3189,19 @@ name = "node-template" version = "2.0.0" dependencies = [ "ctrlc 3.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "node-template-runtime 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sc-basic-authority 2.0.0", - "sc-cli 2.0.0", - "sc-client 2.0.0", + "sc-basic-authorship 0.8.0", + "sc-cli 0.8.0", + "sc-client 0.8.0", "sc-consensus-aura 0.8.0", - "sc-executor 2.0.0", - "sc-finality-grandpa 2.0.0", + "sc-executor 0.8.0", + "sc-finality-grandpa 0.8.0", "sc-network 0.8.0", - "sc-service 2.0.0", + "sc-service 0.8.0", "sc-transaction-pool 2.0.0", "sp-consensus 0.8.0", "sp-consensus-aura 0.8.0", @@ -3212,7 +3212,7 @@ dependencies = [ "sp-runtime 2.0.0", "sp-transaction-pool 2.0.0", "substrate-build-script-utils 2.0.0", - "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", "trie-root 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", "vergen 3.0.4 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3232,8 +3232,7 @@ dependencies = [ "pallet-sudo 2.0.0", "pallet-timestamp 2.0.0", "pallet-transaction-payment 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "safe-mix 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-api 2.0.0", "sp-block-builder 2.0.0", @@ -3264,13 +3263,14 @@ dependencies = [ "pallet-grandpa 2.0.0", "pallet-indices 2.0.0", "pallet-session 2.0.0", + "pallet-society 2.0.0", "pallet-staking 2.0.0", "pallet-timestamp 2.0.0", "pallet-transaction-payment 2.0.0", "pallet-treasury 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sc-client 2.0.0", - "sc-executor 2.0.0", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "sc-client 0.8.0", + "sc-executor 0.8.0", "sp-core 2.0.0", "sp-io 2.0.0", "sp-keyring 2.0.0", @@ -3281,14 +3281,14 @@ dependencies = [ [[package]] name = "node-transaction-factory" -version = "2.0.0" +version = "0.8.0" dependencies = [ "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sc-cli 2.0.0", - "sc-client 2.0.0", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "sc-cli 0.8.0", + "sc-client 0.8.0", "sc-client-api 2.0.0", - "sc-service 2.0.0", + "sc-service 0.8.0", "sp-api 2.0.0", "sp-block-builder 2.0.0", "sp-blockchain 2.0.0", @@ -3452,7 +3452,7 @@ version = "2.0.0" dependencies = [ "frame-support 2.0.0", "frame-system 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-io 2.0.0", @@ -3469,7 +3469,7 @@ dependencies = [ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "pallet-session 2.0.0", "pallet-timestamp 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-application-crypto 2.0.0", @@ -3489,7 +3489,7 @@ dependencies = [ "frame-support 2.0.0", "frame-system 2.0.0", "pallet-session 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-application-crypto 2.0.0", "sp-authority-discovery 2.0.0", @@ -3507,7 +3507,7 @@ dependencies = [ "frame-support 2.0.0", "frame-system 2.0.0", "impl-trait-for-tuples 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "sp-authorship 2.0.0", "sp-core 2.0.0", "sp-inherents 2.0.0", @@ -3526,7 +3526,7 @@ dependencies = [ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "pallet-session 2.0.0", "pallet-timestamp 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-consensus-babe 0.8.0", @@ -3548,8 +3548,7 @@ dependencies = [ "frame-support 2.0.0", "frame-system 2.0.0", "pallet-transaction-payment 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "safe-mix 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-io 2.0.0", @@ -3565,8 +3564,7 @@ dependencies = [ "frame-system 2.0.0", "hex-literal 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "pallet-balances 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "safe-mix 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-io 2.0.0", @@ -3585,14 +3583,14 @@ dependencies = [ "pallet-balances 2.0.0", "pallet-randomness-collective-flip 2.0.0", "pallet-timestamp 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parity-wasm 0.41.0 (registry+https://github.com/rust-lang/crates.io-index)", "pwasm-utils 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-io 2.0.0", "sp-runtime 2.0.0", - "sp-sandbox 2.0.0", + "sp-sandbox 0.8.0", "sp-std 2.0.0", "wabt 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)", "wasmi-validation 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3600,13 +3598,13 @@ dependencies = [ [[package]] name = "pallet-contracts-rpc" -version = "2.0.0" +version = "0.8.0" dependencies = [ "jsonrpc-core 14.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core-client 14.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-derive 14.0.5 (registry+https://github.com/rust-lang/crates.io-index)", - "pallet-contracts-rpc-runtime-api 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "pallet-contracts-rpc-runtime-api 0.8.0", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", "sp-api 2.0.0", @@ -3618,9 +3616,9 @@ dependencies = [ [[package]] name = "pallet-contracts-rpc-runtime-api" -version = "2.0.0" +version = "0.8.0" dependencies = [ - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "sp-api 2.0.0", "sp-runtime 2.0.0", "sp-std 2.0.0", @@ -3633,8 +3631,7 @@ dependencies = [ "frame-support 2.0.0", "frame-system 2.0.0", "pallet-balances 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "safe-mix 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-io 2.0.0", @@ -3650,8 +3647,7 @@ dependencies = [ "frame-system 2.0.0", "hex-literal 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "pallet-balances 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "safe-mix 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-io 2.0.0", @@ -3667,7 +3663,7 @@ dependencies = [ "frame-system 2.0.0", "hex-literal 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "pallet-balances 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-io 2.0.0", @@ -3686,7 +3682,7 @@ dependencies = [ "frame-system 2.0.0", "pallet-balances 2.0.0", "pallet-timestamp 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "primitive-types 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3704,7 +3700,7 @@ dependencies = [ "frame-support 2.0.0", "frame-system 2.0.0", "pallet-balances 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-io 2.0.0", @@ -3719,7 +3715,7 @@ dependencies = [ "frame-support 2.0.0", "frame-system 2.0.0", "impl-trait-for-tuples 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-finality-tracker 2.0.0", @@ -3735,7 +3731,7 @@ version = "2.0.0" dependencies = [ "frame-support 2.0.0", "frame-system 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-io 2.0.0", @@ -3751,7 +3747,7 @@ dependencies = [ "frame-system 2.0.0", "pallet-finality-tracker 2.0.0", "pallet-session 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-finality-grandpa 2.0.0", @@ -3769,7 +3765,7 @@ dependencies = [ "frame-support 2.0.0", "frame-system 2.0.0", "pallet-balances 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-io 2.0.0", @@ -3785,7 +3781,7 @@ dependencies = [ "frame-system 2.0.0", "pallet-authorship 2.0.0", "pallet-session 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-application-crypto 2.0.0", "sp-core 2.0.0", @@ -3801,8 +3797,7 @@ version = "2.0.0" dependencies = [ "frame-support 2.0.0", "frame-system 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "safe-mix 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-io 2.0.0", @@ -3817,7 +3812,7 @@ version = "2.0.0" dependencies = [ "frame-support 2.0.0", "frame-system 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-io 2.0.0", @@ -3832,7 +3827,7 @@ dependencies = [ "frame-support 2.0.0", "frame-system 2.0.0", "pallet-balances 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-io 2.0.0", @@ -3847,7 +3842,7 @@ dependencies = [ "frame-support 2.0.0", "frame-system 2.0.0", "pallet-balances 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-io 2.0.0", @@ -3862,7 +3857,7 @@ version = "2.0.0" dependencies = [ "frame-support 2.0.0", "frame-system 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "safe-mix 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-io 2.0.0", @@ -3878,7 +3873,7 @@ dependencies = [ "frame-support 2.0.0", "frame-system 2.0.0", "pallet-balances 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-io 2.0.0", @@ -3893,7 +3888,7 @@ dependencies = [ "frame-support 2.0.0", "frame-system 2.0.0", "pallet-balances 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-io 2.0.0", @@ -3910,8 +3905,7 @@ dependencies = [ "impl-trait-for-tuples 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "pallet-timestamp 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "safe-mix 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-application-crypto 2.0.0", "sp-core 2.0.0", @@ -3929,7 +3923,7 @@ dependencies = [ "frame-support 2.0.0", "frame-system 2.0.0", "pallet-balances 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", @@ -3949,8 +3943,7 @@ dependencies = [ "pallet-session 2.0.0", "pallet-staking-reward-curve 2.0.0", "pallet-timestamp 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "safe-mix 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-io 2.0.0", @@ -3979,7 +3972,7 @@ version = "2.0.0" dependencies = [ "frame-support 2.0.0", "frame-system 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-io 2.0.0", @@ -3994,7 +3987,7 @@ dependencies = [ "frame-support 2.0.0", "frame-system 2.0.0", "impl-trait-for-tuples 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-inherents 2.0.0", @@ -4012,7 +4005,7 @@ dependencies = [ "frame-system 2.0.0", "pallet-balances 2.0.0", "pallet-transaction-payment-rpc-runtime-api 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-io 2.0.0", "sp-runtime 2.0.0", @@ -4027,7 +4020,7 @@ dependencies = [ "jsonrpc-core-client 14.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-derive 14.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "pallet-transaction-payment-rpc-runtime-api 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-api 2.0.0", "sp-blockchain 2.0.0", @@ -4041,7 +4034,7 @@ name = "pallet-transaction-payment-rpc-runtime-api" version = "2.0.0" dependencies = [ "frame-support 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", "sp-api 2.0.0", @@ -4056,7 +4049,7 @@ dependencies = [ "frame-support 2.0.0", "frame-system 2.0.0", "pallet-balances 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-io 2.0.0", @@ -4071,7 +4064,7 @@ dependencies = [ "frame-support 2.0.0", "frame-system 2.0.0", "pallet-balances 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-io 2.0.0", @@ -4148,7 +4141,7 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -4500,8 +4493,8 @@ dependencies = [ ] [[package]] -name = "prometheus-endpoint" -version = "2.0.0" +name = "prometheus-exporter" +version = "0.8.0" dependencies = [ "async-std 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "derive_more 0.99.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -4917,7 +4910,7 @@ name = "rlp" version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "rustc-hex 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-hex 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -4955,7 +4948,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "rustc-hex" -version = "2.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -5033,7 +5026,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" -version = "2.0.0" +version = "0.8.0" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "derive_more 0.99.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5042,7 +5035,7 @@ dependencies = [ "futures-timer 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "libp2p 0.14.0-alpha.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "prost 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "prost-build 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "quickcheck 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5061,15 +5054,15 @@ dependencies = [ ] [[package]] -name = "sc-basic-authority" -version = "2.0.0" +name = "sc-basic-authorship" +version = "0.8.0" dependencies = [ "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sc-block-builder 2.0.0", - "sc-client 2.0.0", + "sc-block-builder 0.8.0", + "sc-client 0.8.0", "sc-client-api 2.0.0", "sc-telemetry 2.0.0", "sc-transaction-pool 2.0.0", @@ -5086,9 +5079,9 @@ dependencies = [ [[package]] name = "sc-block-builder" -version = "2.0.0" +version = "0.8.0" dependencies = [ - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "sc-client-api 2.0.0", "sp-api 2.0.0", "sp-block-builder 2.0.0", @@ -5096,7 +5089,7 @@ dependencies = [ "sp-consensus 0.8.0", "sp-core 2.0.0", "sp-runtime 2.0.0", - "sp-state-machine 2.0.0", + "sp-state-machine 0.8.0", ] [[package]] @@ -5125,7 +5118,7 @@ dependencies = [ [[package]] name = "sc-cli" -version = "2.0.0" +version = "0.8.0" dependencies = [ "ansi_term 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)", "app_dirs 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5138,12 +5131,12 @@ dependencies = [ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "names 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "prometheus-endpoint 2.0.0", + "prometheus-exporter 0.8.0", "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "rpassword 4.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "sc-client-api 2.0.0", "sc-network 0.8.0", - "sc-service 2.0.0", + "sc-service 0.8.0", "sc-telemetry 2.0.0", "sc-tracing 2.0.0", "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5152,7 +5145,7 @@ dependencies = [ "sp-keyring 2.0.0", "sp-panic-handler 2.0.0", "sp-runtime 2.0.0", - "sp-state-machine 2.0.0", + "sp-state-machine 0.8.0", "structopt 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5161,7 +5154,7 @@ dependencies = [ [[package]] name = "sc-client" -version = "2.0.0" +version = "0.8.0" dependencies = [ "derive_more 0.99.2 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5172,22 +5165,22 @@ dependencies = [ "kvdb 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "kvdb-memorydb 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sc-block-builder 2.0.0", + "sc-block-builder 0.8.0", "sc-client-api 2.0.0", - "sc-executor 2.0.0", + "sc-executor 0.8.0", "sc-telemetry 2.0.0", "sp-api 2.0.0", "sp-blockchain 2.0.0", "sp-consensus 0.8.0", "sp-core 2.0.0", - "sp-externalities 2.0.0", + "sp-externalities 0.8.0", "sp-inherents 2.0.0", "sp-keyring 2.0.0", "sp-panic-handler 2.0.0", "sp-runtime 2.0.0", - "sp-state-machine 2.0.0", + "sp-state-machine 0.8.0", "sp-std 2.0.0", "sp-trie 2.0.0", "sp-version 2.0.0", @@ -5207,19 +5200,19 @@ dependencies = [ "hex-literal 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "kvdb 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sc-executor 2.0.0", + "sc-executor 0.8.0", "sc-telemetry 2.0.0", "sp-api 2.0.0", "sp-blockchain 2.0.0", "sp-consensus 0.8.0", "sp-core 2.0.0", - "sp-externalities 2.0.0", + "sp-externalities 0.8.0", "sp-inherents 2.0.0", "sp-keyring 2.0.0", "sp-runtime 2.0.0", - "sp-state-machine 2.0.0", + "sp-state-machine 0.8.0", "sp-std 2.0.0", "sp-test-primitives 2.0.0", "sp-transaction-pool 2.0.0", @@ -5229,7 +5222,7 @@ dependencies = [ [[package]] name = "sc-client-db" -version = "2.0.0" +version = "0.8.0" dependencies = [ "env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", "hash-db 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5238,22 +5231,23 @@ dependencies = [ "kvdb-rocksdb 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "linked-hash-map 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parity-util-mem 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "quickcheck 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sc-client 2.0.0", + "sc-client 0.8.0", "sc-client-api 2.0.0", - "sc-executor 2.0.0", - "sc-state-db 2.0.0", + "sc-executor 0.8.0", + "sc-state-db 0.8.0", "sp-blockchain 2.0.0", "sp-consensus 0.8.0", "sp-core 2.0.0", "sp-keyring 2.0.0", "sp-runtime 2.0.0", - "sp-state-machine 2.0.0", + "sp-state-machine 0.8.0", "sp-trie 2.0.0", "substrate-test-runtime-client 2.0.0", + "tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -5266,16 +5260,16 @@ dependencies = [ "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "futures-timer 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sc-client 2.0.0", + "sc-client 0.8.0", "sc-client-api 2.0.0", "sc-consensus-slots 0.8.0", - "sc-executor 2.0.0", + "sc-executor 0.8.0", "sc-keystore 2.0.0", "sc-network 0.8.0", - "sc-network-test 2.0.0", - "sc-service 2.0.0", + "sc-network-test 0.8.0", + "sc-service 0.8.0", "sc-telemetry 2.0.0", "sp-api 2.0.0", "sp-application-crypto 2.0.0", @@ -5310,20 +5304,20 @@ dependencies = [ "num-bigint 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "num-rational 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "num-traits 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "pdqselect 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "sc-block-builder 2.0.0", - "sc-client 2.0.0", + "sc-block-builder 0.8.0", + "sc-client 0.8.0", "sc-client-api 2.0.0", "sc-consensus-slots 0.8.0", "sc-consensus-uncles 0.8.0", - "sc-executor 2.0.0", + "sc-executor 0.8.0", "sc-keystore 2.0.0", "sc-network 0.8.0", - "sc-network-test 2.0.0", - "sc-service 2.0.0", + "sc-network-test 0.8.0", + "sc-service 0.8.0", "sc-telemetry 2.0.0", "schnorrkel 0.8.5 (registry+https://github.com/rust-lang/crates.io-index)", "sp-api 2.0.0", @@ -5351,7 +5345,7 @@ dependencies = [ "derive_more 0.99.2 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "sc-client-api 2.0.0", "sp-api 2.0.0", "sp-block-builder 2.0.0", @@ -5371,7 +5365,7 @@ dependencies = [ "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "futures-timer 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "sc-client-api 2.0.0", "sc-telemetry 2.0.0", @@ -5381,7 +5375,7 @@ dependencies = [ "sp-core 2.0.0", "sp-inherents 2.0.0", "sp-runtime 2.0.0", - "sp-state-machine 2.0.0", + "sp-state-machine 0.8.0", "substrate-test-runtime-client 2.0.0", ] @@ -5400,7 +5394,7 @@ dependencies = [ [[package]] name = "sc-executor" -version = "2.0.0" +version = "0.8.0" dependencies = [ "assert_matches 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "derive_more 0.99.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5408,20 +5402,20 @@ dependencies = [ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "libsecp256k1 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parity-wasm 0.41.0 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sc-executor-common 2.0.0", - "sc-executor-wasmi 2.0.0", - "sc-executor-wasmtime 2.0.0", + "sc-executor-common 0.8.0", + "sc-executor-wasmi 0.8.0", + "sc-executor-wasmtime 0.8.0", "sc-runtime-test 2.0.0", "sp-core 2.0.0", - "sp-externalities 2.0.0", + "sp-externalities 0.8.0", "sp-io 2.0.0", "sp-panic-handler 2.0.0", "sp-runtime-interface 2.0.0", "sp-serializer 2.0.0", - "sp-state-machine 2.0.0", + "sp-state-machine 0.8.0", "sp-trie 2.0.0", "sp-version 2.0.0", "sp-wasm-interface 2.0.0", @@ -5433,11 +5427,11 @@ dependencies = [ [[package]] name = "sc-executor-common" -version = "2.0.0" +version = "0.8.0" dependencies = [ "derive_more 0.99.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-runtime-interface 2.0.0", "sp-serializer 2.0.0", @@ -5447,14 +5441,13 @@ dependencies = [ [[package]] name = "sc-executor-wasmi" -version = "2.0.0" +version = "0.8.0" dependencies = [ "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parity-wasm 0.41.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sc-executor-common 2.0.0", + "sc-executor-common 0.8.0", "sp-core 2.0.0", - "sp-externalities 2.0.0", "sp-runtime-interface 2.0.0", "sp-wasm-interface 2.0.0", "wasmi 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5462,7 +5455,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" -version = "2.0.0" +version = "0.8.0" dependencies = [ "assert_matches 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "cranelift-codegen 0.50.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5471,11 +5464,10 @@ dependencies = [ "cranelift-native 0.50.0 (registry+https://github.com/rust-lang/crates.io-index)", "cranelift-wasm 0.50.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parity-wasm 0.41.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sc-executor-common 2.0.0", + "sc-executor-common 0.8.0", "sp-core 2.0.0", - "sp-externalities 2.0.0", "sp-runtime-interface 2.0.0", "sp-wasm-interface 2.0.0", "wasmi 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5486,27 +5478,28 @@ dependencies = [ [[package]] name = "sc-finality-grandpa" -version = "2.0.0" +version = "0.8.0" dependencies = [ "env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", - "finality-grandpa 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)", + "finality-grandpa 0.10.3 (registry+https://github.com/rust-lang/crates.io-index)", "fork-tree 2.0.0", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "futures-timer 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "sc-client 2.0.0", + "sc-client 0.8.0", "sc-client-api 2.0.0", "sc-keystore 2.0.0", "sc-network 0.8.0", - "sc-network-gossip 2.0.0", - "sc-network-test 2.0.0", + "sc-network-gossip 0.8.0", + "sc-network-test 0.8.0", "sc-telemetry 2.0.0", "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", "sp-api 2.0.0", + "sp-arithmetic 2.0.0", "sp-blockchain 2.0.0", "sp-consensus 0.8.0", "sp-consensus-babe 0.8.0", @@ -5516,7 +5509,7 @@ dependencies = [ "sp-inherents 2.0.0", "sp-keyring 2.0.0", "sp-runtime 2.0.0", - "sp-state-machine 2.0.0", + "sp-state-machine 0.8.0", "substrate-test-runtime-client 2.0.0", "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5557,13 +5550,13 @@ dependencies = [ "linked_hash_set 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "lru 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "quickcheck 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-hex 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "sc-block-builder 2.0.0", - "sc-client 2.0.0", + "rustc-hex 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sc-block-builder 0.8.0", + "sc-client 0.8.0", "sc-client-api 2.0.0", "sc-peerset 2.0.0", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5589,7 +5582,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" -version = "2.0.0" +version = "0.8.0" dependencies = [ "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5604,7 +5597,7 @@ dependencies = [ [[package]] name = "sc-network-test" -version = "2.0.0" +version = "0.8.0" dependencies = [ "env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5614,8 +5607,8 @@ dependencies = [ "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "sc-block-builder 2.0.0", - "sc-client 2.0.0", + "sc-block-builder 0.8.0", + "sc-client 0.8.0", "sc-client-api 2.0.0", "sc-network 0.8.0", "sp-blockchain 2.0.0", @@ -5643,11 +5636,11 @@ dependencies = [ "hyper-rustls 0.17.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.11.1 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "sc-client-api 2.0.0", - "sc-client-db 2.0.0", + "sc-client-db 0.8.0", "sc-keystore 2.0.0", "sc-network 0.8.0", "sc-transaction-pool 2.0.0", @@ -5683,15 +5676,15 @@ dependencies = [ "jsonrpc-core 14.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-pubsub 14.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-hex 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "sc-client 2.0.0", + "rustc-hex 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sc-client 0.8.0", "sc-client-api 2.0.0", - "sc-executor 2.0.0", + "sc-executor 0.8.0", "sc-keystore 2.0.0", "sc-network 0.8.0", - "sc-rpc-api 2.0.0", + "sc-rpc-api 0.8.0", "sc-transaction-pool 2.0.0", "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", "sp-api 2.0.0", @@ -5701,7 +5694,7 @@ dependencies = [ "sp-rpc 2.0.0", "sp-runtime 2.0.0", "sp-session 2.0.0", - "sp-state-machine 2.0.0", + "sp-state-machine 0.8.0", "sp-transaction-pool 2.0.0", "sp-version 2.0.0", "substrate-test-runtime-client 2.0.0", @@ -5710,7 +5703,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" -version = "2.0.0" +version = "0.8.0" dependencies = [ "derive_more 0.99.2 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5719,7 +5712,7 @@ dependencies = [ "jsonrpc-derive 14.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-pubsub 14.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5750,31 +5743,32 @@ dependencies = [ "sp-core 2.0.0", "sp-io 2.0.0", "sp-runtime 2.0.0", - "sp-sandbox 2.0.0", + "sp-sandbox 0.8.0", "sp-std 2.0.0", "substrate-wasm-builder-runner 1.0.4", ] [[package]] name = "sc-service" -version = "2.0.0" +version = "0.8.0" dependencies = [ "derive_more 0.99.2 (registry+https://github.com/rust-lang/crates.io-index)", "exit-future 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-timer 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parity-multiaddr 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "prometheus-endpoint 2.0.0", + "prometheus-exporter 0.8.0", "sc-chain-spec 2.0.0", - "sc-client 2.0.0", + "sc-client 0.8.0", "sc-client-api 2.0.0", - "sc-client-db 2.0.0", - "sc-executor 2.0.0", - "sc-finality-grandpa 2.0.0", + "sc-client-db 0.8.0", + "sc-executor 0.8.0", + "sc-finality-grandpa 0.8.0", "sc-keystore 2.0.0", "sc-network 0.8.0", "sc-offchain 2.0.0", @@ -5800,9 +5794,8 @@ dependencies = [ "substrate-test-runtime-client 2.0.0", "sysinfo 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", "target_info 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", "tracing 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -5815,9 +5808,9 @@ dependencies = [ "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "sc-client 2.0.0", + "sc-client 0.8.0", "sc-network 0.8.0", - "sc-service 2.0.0", + "sc-service 0.8.0", "sp-consensus 0.8.0", "sp-core 2.0.0", "sp-runtime 2.0.0", @@ -5828,11 +5821,11 @@ dependencies = [ [[package]] name = "sc-state-db" -version = "2.0.0" +version = "0.8.0" dependencies = [ "env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", ] @@ -5881,7 +5874,7 @@ dependencies = [ "derive_more 0.99.2 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", @@ -5897,7 +5890,7 @@ dependencies = [ "derive_more 0.99.2 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "sc-client-api 2.0.0", "sc-transaction-graph 2.0.0", @@ -6208,11 +6201,11 @@ name = "sp-api" version = "2.0.0" dependencies = [ "hash-db 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "sp-api-proc-macro 2.0.0", "sp-core 2.0.0", "sp-runtime 2.0.0", - "sp-state-machine 2.0.0", + "sp-state-machine 0.8.0", "sp-std 2.0.0", "sp-test-primitives 2.0.0", "sp-version 2.0.0", @@ -6234,13 +6227,13 @@ name = "sp-api-test" version = "2.0.0" dependencies = [ "criterion 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "rustversion 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp-api 2.0.0", "sp-blockchain 2.0.0", "sp-consensus 0.8.0", "sp-runtime 2.0.0", - "sp-state-machine 2.0.0", + "sp-state-machine 0.8.0", "sp-version 2.0.0", "substrate-test-runtime-client 2.0.0", "trybuild 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", @@ -6250,7 +6243,7 @@ dependencies = [ name = "sp-application-crypto" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-io 2.0.0", @@ -6275,7 +6268,7 @@ dependencies = [ "criterion 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "integer-sqrt 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "num-traits 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "primitive-types 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", @@ -6287,7 +6280,7 @@ dependencies = [ name = "sp-authority-discovery" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "sp-api 2.0.0", "sp-application-crypto 2.0.0", "sp-runtime 2.0.0", @@ -6298,7 +6291,7 @@ dependencies = [ name = "sp-authorship" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "sp-inherents 2.0.0", "sp-runtime 2.0.0", "sp-std 2.0.0", @@ -6308,7 +6301,7 @@ dependencies = [ name = "sp-block-builder" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "sp-api 2.0.0", "sp-inherents 2.0.0", "sp-runtime 2.0.0", @@ -6322,12 +6315,12 @@ dependencies = [ "derive_more 0.99.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "lru 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp-block-builder 2.0.0", "sp-consensus 0.8.0", "sp-runtime 2.0.0", - "sp-state-machine 2.0.0", + "sp-state-machine 0.8.0", ] [[package]] @@ -6339,12 +6332,12 @@ dependencies = [ "futures-timer 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "libp2p 0.14.0-alpha.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-inherents 2.0.0", "sp-runtime 2.0.0", - "sp-state-machine 2.0.0", + "sp-state-machine 0.8.0", "sp-std 2.0.0", "sp-test-primitives 2.0.0", "sp-version 2.0.0", @@ -6354,7 +6347,7 @@ dependencies = [ name = "sp-consensus-aura" version = "0.8.0" dependencies = [ - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "sp-api 2.0.0", "sp-application-crypto 2.0.0", "sp-inherents 2.0.0", @@ -6367,7 +6360,7 @@ dependencies = [ name = "sp-consensus-babe" version = "0.8.0" dependencies = [ - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "schnorrkel 0.8.5 (registry+https://github.com/rust-lang/crates.io-index)", "sp-api 2.0.0", "sp-application-crypto 2.0.0", @@ -6382,7 +6375,7 @@ dependencies = [ name = "sp-consensus-pow" version = "0.8.0" dependencies = [ - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "sp-api 2.0.0", "sp-core 2.0.0", "sp-runtime 2.0.0", @@ -6407,19 +6400,19 @@ dependencies = [ "libsecp256k1 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "num-traits 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "pretty_assertions 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "primitive-types 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-hex 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-hex 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "schnorrkel 0.8.5 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", "sha2 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp-debug-derive 2.0.0", - "sp-externalities 2.0.0", + "sp-externalities 0.8.0", "sp-runtime-interface 2.0.0", "sp-serializer 2.0.0", "sp-std 2.0.0", @@ -6443,9 +6436,9 @@ dependencies = [ [[package]] name = "sp-externalities" -version = "2.0.0" +version = "0.8.0" dependencies = [ - "environmental 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "environmental 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "sp-std 2.0.0", "sp-storage 2.0.0", ] @@ -6454,7 +6447,7 @@ dependencies = [ name = "sp-finality-grandpa" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-api 2.0.0", "sp-application-crypto 2.0.0", @@ -6466,7 +6459,7 @@ dependencies = [ name = "sp-finality-tracker" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "sp-inherents 2.0.0", "sp-std 2.0.0", ] @@ -6476,7 +6469,7 @@ name = "sp-inherents" version = "2.0.0" dependencies = [ "derive_more 0.99.2 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-std 2.0.0", @@ -6489,11 +6482,11 @@ dependencies = [ "hash-db 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", "libsecp256k1 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", - "sp-externalities 2.0.0", + "sp-externalities 0.8.0", "sp-runtime-interface 2.0.0", - "sp-state-machine 2.0.0", + "sp-state-machine 0.8.0", "sp-std 2.0.0", "sp-trie 2.0.0", ] @@ -6551,7 +6544,7 @@ version = "2.0.0" dependencies = [ "impl-trait-for-tuples 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "paste 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", @@ -6568,16 +6561,15 @@ dependencies = [ name = "sp-runtime-interface" version = "2.0.0" dependencies = [ - "environmental 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "primitive-types 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustversion 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", - "sp-externalities 2.0.0", + "sp-externalities 0.8.0", "sp-io 2.0.0", "sp-runtime-interface-proc-macro 2.0.0", "sp-runtime-interface-test-wasm 2.0.0", - "sp-state-machine 2.0.0", + "sp-state-machine 0.8.0", "sp-std 2.0.0", "sp-wasm-interface 2.0.0", "static_assertions 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -6599,12 +6591,12 @@ dependencies = [ name = "sp-runtime-interface-test" version = "2.0.0" dependencies = [ - "sc-executor 2.0.0", + "sc-executor 0.8.0", "sp-core 2.0.0", "sp-io 2.0.0", "sp-runtime-interface 2.0.0", "sp-runtime-interface-test-wasm 2.0.0", - "sp-state-machine 2.0.0", + "sp-state-machine 0.8.0", ] [[package]] @@ -6620,10 +6612,10 @@ dependencies = [ [[package]] name = "sp-sandbox" -version = "2.0.0" +version = "0.8.0" dependencies = [ "assert_matches 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-io 2.0.0", "sp-std 2.0.0", @@ -6652,27 +6644,27 @@ dependencies = [ name = "sp-staking" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "sp-runtime 2.0.0", "sp-std 2.0.0", ] [[package]] name = "sp-state-machine" -version = "2.0.0" +version = "0.8.0" dependencies = [ "hash-db 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", "hex-literal 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "num-traits 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", - "sp-externalities 2.0.0", + "sp-externalities 0.8.0", "sp-panic-handler 2.0.0", "sp-trie 2.0.0", - "trie-db 0.18.1 (registry+https://github.com/rust-lang/crates.io-index)", + "trie-db 0.19.2 (registry+https://github.com/rust-lang/crates.io-index)", "trie-root 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -6694,7 +6686,7 @@ dependencies = [ name = "sp-test-primitives" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-application-crypto 2.0.0", "sp-core 2.0.0", @@ -6706,7 +6698,7 @@ name = "sp-timestamp" version = "2.0.0" dependencies = [ "impl-trait-for-tuples 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "sp-api 2.0.0", "sp-inherents 2.0.0", "sp-runtime 2.0.0", @@ -6720,7 +6712,7 @@ dependencies = [ "derive_more 0.99.2 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-api 2.0.0", "sp-runtime 2.0.0", @@ -6733,12 +6725,12 @@ dependencies = [ "criterion 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "hash-db 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", "hex-literal 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "memory-db 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "memory-db 0.18.1 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "sp-core 2.0.0", "sp-std 2.0.0", - "trie-bench 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)", - "trie-db 0.18.1 (registry+https://github.com/rust-lang/crates.io-index)", + "trie-bench 0.19.0 (registry+https://github.com/rust-lang/crates.io-index)", + "trie-db 0.19.2 (registry+https://github.com/rust-lang/crates.io-index)", "trie-root 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", "trie-standardmap 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -6748,7 +6740,7 @@ name = "sp-version" version = "2.0.0" dependencies = [ "impl-serde 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-runtime 2.0.0", "sp-std 2.0.0", @@ -6863,10 +6855,10 @@ dependencies = [ "node-runtime 2.0.0", "pallet-balances 2.0.0", "pallet-transaction-payment 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "rpassword 4.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-hex 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-hex 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "sc-rpc 2.0.0", "sp-core 2.0.0", "sp-runtime 2.0.0", @@ -6898,8 +6890,8 @@ dependencies = [ "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-client-transports 14.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 14.0.5 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sc-rpc-api 2.0.0", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "sc-rpc-api 0.8.0", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-storage 2.0.0", "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", @@ -6916,8 +6908,8 @@ dependencies = [ "jsonrpc-core-client 14.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-derive 14.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sc-client 2.0.0", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "sc-client 0.8.0", "sc-transaction-pool 2.0.0", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-api 2.0.0", @@ -6934,17 +6926,17 @@ version = "2.0.0" dependencies = [ "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "hash-db 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sc-client 2.0.0", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "sc-client 0.8.0", "sc-client-api 2.0.0", - "sc-client-db 2.0.0", - "sc-executor 2.0.0", + "sc-client-db 0.8.0", + "sc-executor 0.8.0", "sp-blockchain 2.0.0", "sp-consensus 0.8.0", "sp-core 2.0.0", "sp-keyring 2.0.0", "sp-runtime 2.0.0", - "sp-state-machine 2.0.0", + "sp-state-machine 0.8.0", ] [[package]] @@ -6957,12 +6949,12 @@ dependencies = [ "frame-system 2.0.0", "frame-system-rpc-runtime-api 2.0.0", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "memory-db 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)", + "memory-db 0.18.1 (registry+https://github.com/rust-lang/crates.io-index)", "pallet-babe 2.0.0", "pallet-timestamp 2.0.0", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sc-client 2.0.0", - "sc-executor 2.0.0", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "sc-client 0.8.0", + "sc-executor 0.8.0", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "sp-api 2.0.0", "sp-application-crypto 2.0.0", @@ -6977,14 +6969,14 @@ dependencies = [ "sp-runtime 2.0.0", "sp-runtime-interface 2.0.0", "sp-session 2.0.0", - "sp-state-machine 2.0.0", + "sp-state-machine 0.8.0", "sp-std 2.0.0", "sp-transaction-pool 2.0.0", "sp-trie 2.0.0", "sp-version 2.0.0", "substrate-test-runtime-client 2.0.0", "substrate-wasm-builder-runner 1.0.4", - "trie-db 0.18.1 (registry+https://github.com/rust-lang/crates.io-index)", + "trie-db 0.19.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -6992,9 +6984,9 @@ name = "substrate-test-runtime-client" version = "2.0.0" dependencies = [ "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sc-block-builder 2.0.0", - "sc-client 2.0.0", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "sc-block-builder 0.8.0", + "sc-client 0.8.0", "sc-client-api 2.0.0", "sp-api 2.0.0", "sp-blockchain 2.0.0", @@ -7104,6 +7096,15 @@ name = "target_info" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "tempdir" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "tempfile" version = "3.1.0" @@ -7259,6 +7260,7 @@ dependencies = [ "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.11.1 (registry+https://github.com/rust-lang/crates.io-index)", "pin-project-lite 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -7523,28 +7525,29 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "trie-bench" -version = "0.18.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "criterion 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "hash-db 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", "keccak-hasher 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", - "memory-db 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "trie-db 0.18.1 (registry+https://github.com/rust-lang/crates.io-index)", + "memory-db 0.18.1 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "trie-db 0.19.2 (registry+https://github.com/rust-lang/crates.io-index)", "trie-root 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", "trie-standardmap 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "trie-db" -version = "0.18.1" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "hash-db 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", "hashbrown 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-hex 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -7618,7 +7621,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "crunchy 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-hex 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-hex 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "static_assertions 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -8383,7 +8386,7 @@ dependencies = [ "checksum enumflags2_derive 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ecf634c5213044b8d54a46dd282cf5dd1f86bb5cb53e92c409cb4680a7fb9894" "checksum env_logger 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "aafcde04e90a5226a6443b7aabdb016ba2f8307c847d524724bd9b346dd1a2d3" "checksum env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" -"checksum environmental 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "34f8467a0284de039e6bd0e25c14519538462ba5beb548bb1f03e645097837a8" +"checksum environmental 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "516aa8d7a71cb00a1c4146f0798549b93d083d4f189b3ced8f3de6b8f11ee6c4" "checksum erased-serde 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "3beee4bc16478a1b26f2e80ad819a52d24745e292f521a63c16eea5f74b7eb60" "checksum errno 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "c2a071601ed01b988f896ab14b95e67335d1eeb50190932a1320f7fe3cadc84e" "checksum errno-dragonfly 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "14ca354e36190500e1e1fb267c647932382b54053c50b14970856c0b00a35067" @@ -8399,7 +8402,7 @@ dependencies = [ "checksum fallible-iterator 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" "checksum fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b1ee15a7050e5580b3712877157068ea713b245b080ff302ae2ca973cfcd9baa" "checksum file-per-thread-logger 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8505b75b31ef7285168dd237c4a7db3c1f3e0927e7d314e670bc98e854272fe9" -"checksum finality-grandpa 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4106eb29c7e092f4a6ce6e7632abbbfdf85d94e63035d3790d2d16eeae83d3f4" +"checksum finality-grandpa 0.10.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2d9ad6bb0e42865b2d79fc9c8a08f22c39127310ed3334f2a1119ca25ed69dfb" "checksum fixed-hash 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "72fe7539e2c5692c6989f2f9c0457e42f1e5768f96b85c87d273574670ae459f" "checksum fixedbitset 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "86d4de0081402f5e88cdac65c8dcdcc73118c1a7a465e2a05f0da05843a8ea33" "checksum flate2 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)" = "6bd6d6f4752952feb71363cffc9ebac9411b75b87c6ab6058c40c8900cf43c0f" @@ -8538,7 +8541,7 @@ dependencies = [ "checksum maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" "checksum memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "88579771288728879b57485cc7d6b07d648c9f0141eb955f8ab7f9d45394468e" "checksum memoffset 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "75189eb85871ea5c2e2c15abbdd541185f63b408415e5051f5cac122d8c774b9" -"checksum memory-db 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)" = "828bdf600636e90c56652689f7c3823ae2072104e4b0b5e83ea984f592f12ab9" +"checksum memory-db 0.18.1 (registry+https://github.com/rust-lang/crates.io-index)" = "881736a0f68a6fae1b596bb066c5bd16d7b3ed645a4dd8ffaefd02f585abaf71" "checksum memory_units 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "71d96e3f3c0b6325d8ccd83c33b28acb183edcb6c67938ba104ec546854b0882" "checksum merlin 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2b0942b357c1b4d0dc43ba724674ec89c3218e6ca2b3e8269e7cb53bcecd2f6e" "checksum mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ba626b8a6de5da682e1caa06bdb42a335aee5a84db8e5046a3e8ab17ba0a3ae0" @@ -8578,7 +8581,7 @@ dependencies = [ "checksum parity-multiaddr 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6de20a133b50f5120c6b8284ee88c5017fb167149208b3ee2e95f8719a434dc4" "checksum parity-multihash 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "df3a17dc27848fd99e4f87eb0f8c9baba6ede0a6d555400c850ca45254ef4ce3" "checksum parity-multihash 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "70a4d7b05e51bff5ae2c29c7b8c3d889985bbd8f4e15b3542fcc1f6f9666d292" -"checksum parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f9f9d99dae413590a5f37e43cd99b94d4e62a244160562899126913ea7108673" +"checksum parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f747c06d9f3b2ad387ac881b9667298c81b1243aa9833f086e05996937c35507" "checksum parity-scale-codec-derive 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "34e513ff3e406f3ede6796dcdc83d0b32ffb86668cea1ccf7363118abeb00476" "checksum parity-send-wrapper 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aa9777aa91b8ad9dd5aaa04a9b6bcb02c7f1deb952fca5a66034d5e63afc5c6f" "checksum parity-util-mem 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8174d85e62c4d615fddd1ef67966bdc5757528891d0742f15b131ad04667b3f9" @@ -8665,7 +8668,7 @@ dependencies = [ "checksum rpassword 4.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d59f0e97173c514b9036cd450c195a6483ba81055c6fa0f1bff3ab563f47d44a" "checksum rust-argon2 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4ca4eaef519b494d1f2848fc602d18816fed808a981aedf4f1f00ceb7c9d32cf" "checksum rustc-demangle 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" -"checksum rustc-hex 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "403bb3a286107a04825a5f82e1270acc1e14028d3d554d7a1e08914549575ab8" +"checksum rustc-hex 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" "checksum rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "c5f5376ea5e30ce23c03eb77cbe4962b988deead10910c372b226388b594c084" "checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" "checksum rustls 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b25a18b1bf7387f0145e7f8324e700805aade3842dd3db2e74e4cdeb4677c09e" @@ -8731,6 +8734,7 @@ dependencies = [ "checksum take_mut 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f764005d11ee5f36500a149ace24e00e3da98b0158b3e2d53a7495660d3f4d60" "checksum target-lexicon 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6f4c118a7a38378f305a9e111fcb2f7f838c0be324bfb31a77ea04f7f6e684b4" "checksum target_info 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c63f48baada5c52e65a29eef93ab4f8982681b67f9e8d29c7b05abcfec2b9ffe" +"checksum tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8" "checksum tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" "checksum termcolor 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "96d6098003bde162e4277c70665bd87c326f5a0c3f3fbfb285787fa482d54e6e" "checksum test-case 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a605baa797821796a751f4a959e1206079b24a4b7e1ed302b7d785d81a9276c9" @@ -8770,8 +8774,8 @@ dependencies = [ "checksum tracing-attributes 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "a4263b12c3d3c403274493eb805966093b53214124796552d674ca1dd5d27c2b" "checksum tracing-core 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "bc913647c520c959b6d21e35ed8fa6984971deca9f0a2fcb8c51207e0c56af1d" "checksum traitobject 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "efd1f82c56340fdf16f2a953d7bda4f8fdffba13d93b00844c25572110b26079" -"checksum trie-bench 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4d276e600d06806a4ac61e5d6b145a620001e6147151e60a4f1f46a784ec4baa" -"checksum trie-db 0.18.1 (registry+https://github.com/rust-lang/crates.io-index)" = "191fda5d0106f3ed35a8c6875428b213e15c516e48129cc263dd7ad16e9a665f" +"checksum trie-bench 0.19.0 (registry+https://github.com/rust-lang/crates.io-index)" = "26fd042d57ee9c987c562811162a78db78b5340ab674ac76056c85dca49b26bc" +"checksum trie-db 0.19.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a0d747ae5b6f078df7e46477fcc7df66df9eb4f27a031cf4a7c890a8dd03d8e6" "checksum trie-root 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)" = "0b779f7c1c8fe9276365d9d5be5c4b5adeacf545117bb3f64c974305789c5c0b" "checksum trie-standardmap 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c3161ba520ab28cd8e6b68e1126f1009f6e335339d1a73b978139011703264c8" "checksum try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" diff --git a/bin/node-template/Cargo.toml b/bin/node-template/Cargo.toml index aaaae647cf564..f956bbb22193a 100644 --- a/bin/node-template/Cargo.toml +++ b/bin/node-template/Cargo.toml @@ -11,18 +11,17 @@ path = "src/main.rs" [dependencies] futures = "0.3.1" -futures01 = { package = "futures", version = "0.1.29" } ctrlc = { version = "3.1.3", features = ["termination"] } log = "0.4.8" -tokio = "0.1.22" +tokio = { version = "0.2", features = ["rt-threaded"] } parking_lot = "0.9.0" codec = { package = "parity-scale-codec", version = "1.0.0" } trie-root = "0.15.2" sp-io = { version = "2.0.0", path = "../../primitives/io" } -sc-cli = { version = "2.0.0", path = "../../client/cli" } +sc-cli = { version = "0.8.0", path = "../../client/cli" } sp-core = { version = "2.0.0", path = "../../primitives/core" } -sc-executor = { version = "2.0.0", path = "../../client/executor" } -sc-service = { version = "2.0.0", path = "../../client/service" } +sc-executor = { version = "0.8", path = "../../client/executor" } +sc-service = { version = "0.8", path = "../../client/service" } sp-inherents = { version = "2.0.0", path = "../../primitives/inherents" } sc-transaction-pool = { version = "2.0.0", path = "../../client/transaction-pool" } sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } @@ -30,12 +29,12 @@ sc-network = { version = "0.8", path = "../../client/network" } sc-consensus-aura = { version = "0.8", path = "../../client/consensus/aura" } sp-consensus-aura = { version = "0.8", path = "../../primitives/consensus/aura" } sp-consensus = { version = "0.8", path = "../../primitives/consensus/common" } -grandpa = { version = "2.0.0", package = "sc-finality-grandpa", path = "../../client/finality-grandpa" } +grandpa = { version = "0.8", package = "sc-finality-grandpa", path = "../../client/finality-grandpa" } grandpa-primitives = { version = "2.0.0", package = "sp-finality-grandpa", path = "../../primitives/finality-grandpa" } -sc-client = { version = "2.0.0", path = "../../client/" } +sc-client = { version = "0.8", path = "../../client/" } node-template-runtime = { version = "2.0.0", path = "runtime" } sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sc-basic-authority = { path = "../../client/basic-authorship" } +sc-basic-authorship = { path = "../../client/basic-authorship" } [build-dependencies] vergen = "3.0.4" diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index b61ad25f466a4..e048d8baf7bfe 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -18,7 +18,6 @@ transaction-payment = { version = "2.0.0", default-features = false, package = " codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } frame-executive = { version = "2.0.0", default-features = false, path = "../../../frame/executive" } -safe-mix = { version = "1.0.0", default-features = false } serde = { version = "1.0.101", optional = true, features = ["derive"] } sp-api = { version = "2.0.0", default-features = false, path = "../../../primitives/api" } sp-block-builder = { path = "../../../primitives/block-builder", default-features = false} @@ -47,7 +46,6 @@ std = [ "grandpa/std", "indices/std", "randomness-collective-flip/std", - "safe-mix/std", "serde", "sp-api/std", "sp-block-builder/std", diff --git a/bin/node-template/src/cli.rs b/bin/node-template/src/cli.rs index 16638c4af955d..44764e5c9db41 100644 --- a/bin/node-template/src/cli.rs +++ b/bin/node-template/src/cli.rs @@ -1,5 +1,5 @@ use crate::service; -use futures::{future::{select, Map}, FutureExt, TryFutureExt, channel::oneshot, compat::Future01CompatExt}; +use futures::{future::{select, Map, Either}, FutureExt, channel::oneshot}; use std::cell::RefCell; use tokio::runtime::Runtime; pub use sc_cli::{VersionInfo, IntoExit, error}; @@ -75,36 +75,23 @@ where let informant = informant::build(&service); - let future = select(exit, informant) - .map(|_| Ok(())) - .compat(); - - runtime.executor().spawn(future); + let handle = runtime.spawn(select(exit, informant)); // we eagerly drop the service so that the internal exit future is fired, // but we need to keep holding a reference to the global telemetry guard let _telemetry = service.telemetry(); - let service_res = { - let exit = e.into_exit(); - let service = service - .map_err(|err| error::Error::Service(err)) - .compat(); - let select = select(service, exit) - .map(|_| Ok(())) - .compat(); - runtime.block_on(select) - }; + let exit = e.into_exit(); + let service_res = runtime.block_on(select(service, exit)); let _ = exit_send.send(()); - // TODO [andre]: timeout this future #1318 - - use futures01::Future; + runtime.block_on(handle); - let _ = runtime.shutdown_on_idle().wait(); - - service_res + match service_res { + Either::Left((res, _)) => res.map_err(error::Error::Service), + Either::Right((_, _)) => Ok(()) + } } // handles ctrl-c diff --git a/bin/node-template/src/service.rs b/bin/node-template/src/service.rs index 92db95b5c7d89..ed2299e30f73e 100644 --- a/bin/node-template/src/service.rs +++ b/bin/node-template/src/service.rs @@ -11,7 +11,7 @@ use sc_executor::native_executor_instance; pub use sc_executor::NativeExecutor; use sp_consensus_aura::sr25519::{AuthorityPair as AuraPair}; use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}; -use sc_basic_authority; +use futures::{FutureExt, compat::Future01CompatExt}; // Our native executor instance. native_executor_instance!( @@ -106,7 +106,7 @@ pub fn new_full(config: Configuration(config: Configuration { // start the full GRANDPA voter @@ -180,7 +180,7 @@ pub fn new_full(config: Configuration { grandpa::setup_disabled_grandpa( diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 1f7c90bc3a6c5..c81cf38a9c2ff 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -25,7 +25,6 @@ crate-type = ["cdylib", "rlib"] # third-party dependencies codec = { package = "parity-scale-codec", version = "1.0.6" } serde = { version = "1.0.102", features = ["derive"] } -futures01 = { package = "futures", version = "0.1.29" } futures = { version = "0.3.1", features = ["compat"] } hex-literal = "0.2.1" jsonrpc-core = "14.0.3" @@ -48,20 +47,20 @@ sp-consensus = { version = "0.8", path = "../../../primitives/consensus/common" # client dependencies sc-client-api = { version = "2.0.0", path = "../../../client/api" } -sc-client = { version = "2.0.0", path = "../../../client/" } +sc-client = { version = "0.8", path = "../../../client/" } sc-chain-spec = { version = "2.0.0", path = "../../../client/chain-spec" } sc-transaction-pool = { version = "2.0.0", path = "../../../client/transaction-pool" } sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } sc-network = { version = "0.8", path = "../../../client/network" } sc-consensus-babe = { version = "0.8", path = "../../../client/consensus/babe" } -grandpa = { version = "2.0.0", package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } -sc-client-db = { version = "2.0.0", default-features = false, path = "../../../client/db" } +grandpa = { version = "0.8", package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } +sc-client-db = { version = "0.8", default-features = false, path = "../../../client/db" } sc-offchain = { version = "2.0.0", path = "../../../client/offchain" } sc-rpc = { version = "2.0.0", path = "../../../client/rpc" } -sc-basic-authority = { version = "2.0.0", path = "../../../client/basic-authorship" } -sc-service = { version = "2.0.0", default-features = false, path = "../../../client/service" } +sc-basic-authorship = { version = "0.8", path = "../../../client/basic-authorship" } +sc-service = { version = "0.8", default-features = false, path = "../../../client/service" } sc-telemetry = { version = "2.0.0", path = "../../../client/telemetry" } -sc-authority-discovery = { version = "2.0.0", path = "../../../client/authority-discovery" } +sc-authority-discovery = { version = "0.8", path = "../../../client/authority-discovery" } # frame dependencies pallet-indices = { version = "2.0.0", path = "../../../frame/indices" } @@ -81,10 +80,10 @@ node-primitives = { version = "2.0.0", path = "../primitives" } node-executor = { version = "2.0.0", path = "../executor" } # CLI-specific dependencies -tokio = { version = "0.1.22", optional = true } -sc-cli = { version = "2.0.0", optional = true, path = "../../../client/cli" } +tokio = { version = "0.2", features = ["rt-threaded"], optional = true } +sc-cli = { version = "0.8.0", optional = true, path = "../../../client/cli" } ctrlc = { version = "3.1.3", features = ["termination"], optional = true } -node-transaction-factory = { version = "2.0.0", optional = true, path = "../transaction-factory" } +node-transaction-factory = { version = "0.8.0", optional = true, path = "../transaction-factory" } # WASM-specific dependencies wasm-bindgen = { version = "0.2.57", optional = true } @@ -99,7 +98,7 @@ futures = "0.3.1" tempfile = "3.1.0" [build-dependencies] -sc-cli = { version = "2.0.0", package = "sc-cli", path = "../../../client/cli" } +sc-cli = { version = "0.8.0", package = "sc-cli", path = "../../../client/cli" } build-script-utils = { version = "2.0.0", package = "substrate-build-script-utils", path = "../../../utils/build-script-utils" } structopt = "=0.3.7" vergen = "3.0.4" diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs index 26b1f6d2949e3..b8ff948b01b24 100644 --- a/bin/node/cli/src/chain_spec.rs +++ b/bin/node/cli/src/chain_spec.rs @@ -21,8 +21,8 @@ use sp_core::{Pair, Public, crypto::UncheckedInto, sr25519}; use serde::{Serialize, Deserialize}; use node_runtime::{ AuthorityDiscoveryConfig, BabeConfig, BalancesConfig, ContractsConfig, CouncilConfig, DemocracyConfig, - GrandpaConfig, ImOnlineConfig, IndicesConfig, SessionConfig, SessionKeys, StakerStatus, StakingConfig, SudoConfig, - SystemConfig, TechnicalCommitteeConfig, WASM_BINARY, + GrandpaConfig, ImOnlineConfig, IndicesConfig, SessionConfig, SessionKeys, StakerStatus, StakingConfig, + SocietyConfig, SudoConfig, SystemConfig, TechnicalCommitteeConfig, WASM_BINARY, }; use node_runtime::Block; use node_runtime::constants::currency::*; @@ -295,6 +295,11 @@ pub fn testnet_genesis( }), pallet_membership_Instance1: Some(Default::default()), pallet_treasury: Some(Default::default()), + pallet_society: Some(SocietyConfig { + members: endowed_accounts[0..3].to_vec(), + pot: 0, + max_members: 999, + }) } } diff --git a/bin/node/cli/src/cli.rs b/bin/node/cli/src/cli.rs index 8fb95bed687bb..dcc5d39dbb7e7 100644 --- a/bin/node/cli/src/cli.rs +++ b/bin/node/cli/src/cli.rs @@ -15,7 +15,6 @@ // along with Substrate. If not, see . pub use sc_cli::VersionInfo; -use tokio::prelude::Future; use tokio::runtime::{Builder as RuntimeBuilder, Runtime}; use sc_cli::{IntoExit, NoCustom, SharedParams, ImportParams, error}; use sc_service::{AbstractService, Roles as ServiceRoles, Configuration}; @@ -25,6 +24,7 @@ use sc_cli::{display_role, parse_and_prepare, GetSharedParams, ParseAndPrepare}; use crate::{service, ChainSpec, load_spec}; use crate::factory_impl::FactoryState; use node_transaction_factory::RuntimeAdapter; +use futures::{channel::oneshot, future::{select, Either}}; /// Custom subcommands. #[derive(Clone, Debug, StructOpt)] @@ -105,7 +105,10 @@ pub fn run(args: I, exit: E, version: sc_cli::VersionInfo) -> error::Re info!("Chain specification: {}", config.chain_spec.name()); info!("Node name: {}", config.name); info!("Roles: {}", display_role(&config)); - let runtime = RuntimeBuilder::new().name_prefix("main-tokio-").build() + let runtime = RuntimeBuilder::new() + .thread_name("main-tokio-") + .threaded_scheduler() + .build() .map_err(|e| format!("{:?}", e))?; match config.roles { ServiceRoles::LIGHT => run_until_exit( @@ -135,8 +138,8 @@ pub fn run(args: I, exit: E, version: sc_cli::VersionInfo) -> error::Re load_spec, &cli_args.shared_params, &version, + None, )?; - sc_cli::fill_import_params(&mut config, &cli_args.import_params, ServiceRoles::FULL)?; match ChainSpec::from(config.chain_spec.id()) { @@ -172,37 +175,25 @@ where T: AbstractService, E: IntoExit, { - use futures::{FutureExt, TryFutureExt, channel::oneshot, future::select, compat::Future01CompatExt}; - let (exit_send, exit) = oneshot::channel(); let informant = sc_cli::informant::build(&service); - let future = select(informant, exit) - .map(|_| Ok(())) - .compat(); - - runtime.executor().spawn(future); + let handle = runtime.spawn(select(exit, informant)); // we eagerly drop the service so that the internal exit future is fired, // but we need to keep holding a reference to the global telemetry guard let _telemetry = service.telemetry(); - let service_res = { - let exit = e.into_exit(); - let service = service - .map_err(|err| error::Error::Service(err)) - .compat(); - let select = select(service, exit) - .map(|_| Ok(())) - .compat(); - runtime.block_on(select) - }; + let exit = e.into_exit(); + let service_res = runtime.block_on(select(service, exit)); let _ = exit_send.send(()); - // TODO [andre]: timeout this future #1318 - let _ = runtime.shutdown_on_idle().wait(); + runtime.block_on(handle); - service_res + match service_res { + Either::Left((res, _)) => res.map_err(error::Error::Service), + Either::Right((_, _)) => Ok(()) + } } diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 97ecb7a38f2f7..b12dfcdc515f5 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -113,8 +113,8 @@ macro_rules! new_full_start { macro_rules! new_full { ($config:expr, $with_startup_data: expr) => {{ use futures::{ - stream::StreamExt, - future::{FutureExt, TryFutureExt}, + prelude::*, + compat::Future01CompatExt }; use sc_network::Event; @@ -151,7 +151,7 @@ macro_rules! new_full { ($with_startup_data)(&block_import, &babe_link); if participates_in_consensus { - let proposer = sc_basic_authority::ProposerFactory { + let proposer = sc_basic_authorship::ProposerFactory { client: service.client(), transaction_pool: service.transaction_pool(), }; @@ -191,9 +191,8 @@ macro_rules! new_full { service.keystore(), dht_event_stream, ); - let future01_authority_discovery = authority_discovery.map(|x| Ok(x)).compat(); - service.spawn_task(future01_authority_discovery); + service.spawn_task(authority_discovery); } // if the node isn't actively participating in consensus then it doesn't @@ -223,7 +222,7 @@ macro_rules! new_full { service.network(), service.on_exit(), service.spawn_task_handle(), - )?); + )?.compat().map(drop)); }, (true, false) => { // start the full GRANDPA voter @@ -239,7 +238,9 @@ macro_rules! new_full { }; // the GRANDPA voter task is considered infallible, i.e. // if it fails we take down the service with it. - service.spawn_essential_task(grandpa::run_grandpa_voter(grandpa_config)?); + service.spawn_essential_task( + grandpa::run_grandpa_voter(grandpa_config)?.compat().map(drop) + ); }, (_, true) => { grandpa::setup_disabled_grandpa( @@ -508,7 +509,7 @@ mod tests { let parent_id = BlockId::number(service.client().chain_info().best_number); let parent_header = service.client().header(&parent_id).unwrap().unwrap(); - let mut proposer_factory = sc_basic_authority::ProposerFactory { + let mut proposer_factory = sc_basic_authorship::ProposerFactory { client: service.client(), transaction_pool: service.transaction_pool(), }; @@ -534,13 +535,15 @@ mod tests { digest.push(::babe_pre_digest(babe_pre_digest)); - let mut proposer = proposer_factory.init(&parent_header).unwrap(); - let new_block = futures::executor::block_on(proposer.propose( - inherent_data, - digest, - std::time::Duration::from_secs(1), - RecordProof::Yes, - )).expect("Error making test block").block; + let new_block = futures::executor::block_on(async move { + let proposer = proposer_factory.init(&parent_header).await; + proposer.unwrap().propose( + inherent_data, + digest, + std::time::Duration::from_secs(1), + RecordProof::Yes, + ).await + }).expect("Error making test block").block; let (new_header, new_body) = new_block.deconstruct(); let pre_hash = new_header.hash(); diff --git a/bin/node/executor/Cargo.toml b/bin/node/executor/Cargo.toml index 3aa7ad780a8db..6e624e09d28dd 100644 --- a/bin/node/executor/Cargo.toml +++ b/bin/node/executor/Cargo.toml @@ -9,10 +9,10 @@ edition = "2018" codec = { package = "parity-scale-codec", version = "1.0.0" } node-primitives = { version = "2.0.0", path = "../primitives" } node-runtime = { version = "2.0.0", path = "../runtime" } -sc-executor = { version = "2.0.0", path = "../../../client/executor" } +sc-executor = { version = "0.8", path = "../../../client/executor" } sp-core = { version = "2.0.0", path = "../../../primitives/core" } sp-io = { version = "2.0.0", path = "../../../primitives/io" } -sp-state-machine = { version = "2.0.0", path = "../../../primitives/state-machine" } +sp-state-machine = { version = "0.8", path = "../../../primitives/state-machine" } sp-trie = { version = "2.0.0", path = "../../../primitives/trie" } trie-root = "0.15.2" diff --git a/bin/node/rpc/Cargo.toml b/bin/node/rpc/Cargo.toml index 8c96542a8e6bd..174b2875166c2 100644 --- a/bin/node/rpc/Cargo.toml +++ b/bin/node/rpc/Cargo.toml @@ -5,13 +5,13 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] -sc-client = { version = "2.0.0", path = "../../../client/" } +sc-client = { version = "0.8", path = "../../../client/" } jsonrpc-core = "14.0.3" node-primitives = { version = "2.0.0", path = "../primitives" } node-runtime = { version = "2.0.0", path = "../runtime" } sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } sp-api = { version = "2.0.0", path = "../../../primitives/api" } -pallet-contracts-rpc = { version = "2.0.0", path = "../../../frame/contracts/rpc/" } +pallet-contracts-rpc = { version = "0.8.0", path = "../../../frame/contracts/rpc/" } pallet-transaction-payment-rpc = { version = "2.0.0", path = "../../../frame/transaction-payment/rpc/" } substrate-frame-rpc-system = { version = "2.0.0", path = "../../../utils/frame/rpc/system" } sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index a8f26d13cba40..27b1e9382f933 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -9,7 +9,6 @@ build = "build.rs" # third-party dependencies codec = { package = "parity-scale-codec", version = "1.0.6", default-features = false, features = ["derive"] } integer-sqrt = { version = "0.1.2" } -safe-mix = { version = "1.0", default-features = false } rustc-hex = { version = "2.0", optional = true } serde = { version = "1.0.102", optional = true } @@ -41,21 +40,23 @@ pallet-babe = { version = "2.0.0", default-features = false, path = "../../../fr pallet-balances = { version = "2.0.0", default-features = false, path = "../../../frame/balances" } pallet-collective = { version = "2.0.0", default-features = false, path = "../../../frame/collective" } pallet-contracts = { version = "2.0.0", default-features = false, path = "../../../frame/contracts" } -pallet-contracts-rpc-runtime-api = { version = "2.0.0", default-features = false, path = "../../../frame/contracts/rpc/runtime-api/" } +pallet-contracts-rpc-runtime-api = { version = "0.8.0", default-features = false, path = "../../../frame/contracts/rpc/runtime-api/" } pallet-democracy = { version = "2.0.0", default-features = false, path = "../../../frame/democracy" } pallet-elections-phragmen = { version = "2.0.0", default-features = false, path = "../../../frame/elections-phragmen" } pallet-finality-tracker = { version = "2.0.0", default-features = false, path = "../../../frame/finality-tracker" } pallet-grandpa = { version = "2.0.0", default-features = false, path = "../../../frame/grandpa" } pallet-im-online = { version = "2.0.0", default-features = false, path = "../../../frame/im-online" } pallet-indices = { version = "2.0.0", default-features = false, path = "../../../frame/indices" } +pallet-identity = { version = "2.0.0", default-features = false, path = "../../../frame/identity" } pallet-membership = { version = "2.0.0", default-features = false, path = "../../../frame/membership" } -pallet-nicks = { version = "2.0.0", default-features = false, path = "../../../frame/nicks" } pallet-offences = { version = "2.0.0", default-features = false, path = "../../../frame/offences" } pallet-randomness-collective-flip = { version = "2.0.0", default-features = false, path = "../../../frame/randomness-collective-flip" } +pallet-recovery = { version = "2.0.0", default-features = false, path = "../../../frame/recovery" } pallet-session = { version = "2.0.0", features = ["historical"], path = "../../../frame/session", default-features = false } pallet-staking = { version = "2.0.0", features = ["migrate"], path = "../../../frame/staking", default-features = false } pallet-staking-reward-curve = { version = "2.0.0", path = "../../../frame/staking/reward-curve" } pallet-sudo = { version = "2.0.0", default-features = false, path = "../../../frame/sudo" } +pallet-society = { version = "2.0.0", default-features = false, path = "../../../frame/society" } pallet-timestamp = { version = "2.0.0", default-features = false, path = "../../../frame/timestamp" } pallet-treasury = { version = "2.0.0", default-features = false, path = "../../../frame/treasury" } pallet-utility = { version = "2.0.0", default-features = false, path = "../../../frame/utility" } @@ -91,7 +92,7 @@ std = [ "pallet-indices/std", "sp-inherents/std", "pallet-membership/std", - "pallet-nicks/std", + "pallet-identity/std", "node-primitives/std", "sp-offchain/std", "pallet-offences/std", @@ -99,7 +100,6 @@ std = [ "pallet-randomness-collective-flip/std", "sp-std/std", "rustc-hex", - "safe-mix/std", "serde", "pallet-session/std", "sp-api/std", @@ -119,4 +119,6 @@ std = [ "sp-transaction-pool/std", "pallet-utility/std", "sp-version/std", + "pallet-society/std", + "pallet-recovery/std", ] diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 4266ad7459e65..d769be7b505d5 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -80,8 +80,8 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to equal spec_version. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 203, - impl_version: 203, + spec_version: 204, + impl_version: 204, apis: RUNTIME_API_VERSIONS, }; @@ -174,7 +174,7 @@ parameter_types! { impl pallet_balances::Trait for Runtime { type Balance = Balance; type OnFreeBalanceZero = ((Staking, Contracts), Session); - type OnReapAccount = System; + type OnReapAccount = (System, Recovery); type OnNewAccount = Indices; type Event = Event; type DustRemoval = (); @@ -488,19 +488,22 @@ impl pallet_finality_tracker::Trait for Runtime { } parameter_types! { - pub const ReservationFee: Balance = 1 * DOLLARS; - pub const MinLength: usize = 3; - pub const MaxLength: usize = 16; + pub const BasicDeposit: Balance = 10 * DOLLARS; // 258 bytes on-chain + pub const FieldDeposit: Balance = 250 * CENTS; // 66 bytes on-chain + pub const SubAccountDeposit: Balance = 2 * DOLLARS; // 53 bytes on-chain + pub const MaximumSubAccounts: u32 = 100; } -impl pallet_nicks::Trait for Runtime { +impl pallet_identity::Trait for Runtime { type Event = Event; type Currency = Balances; - type ReservationFee = ReservationFee; type Slashed = Treasury; - type ForceOrigin = pallet_collective::EnsureMember; - type MinLength = MinLength; - type MaxLength = MaxLength; + type BasicDeposit = BasicDeposit; + type FieldDeposit = FieldDeposit; + type SubAccountDeposit = SubAccountDeposit; + type MaximumSubAccounts = MaximumSubAccounts; + type RegistrarOrigin = pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; + type ForceOrigin = pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; } impl frame_system::offchain::CreateTransaction for Runtime { @@ -543,6 +546,49 @@ impl frame_system::offchain::CreateTransaction for } } +parameter_types! { + pub const ConfigDepositBase: Balance = 5 * DOLLARS; + pub const FriendDepositFactor: Balance = 50 * CENTS; + pub const MaxFriends: u16 = 9; + pub const RecoveryDeposit: Balance = 5 * DOLLARS; +} + +impl pallet_recovery::Trait for Runtime { + type Event = Event; + type Call = Call; + type Currency = Balances; + type ConfigDepositBase = ConfigDepositBase; + type FriendDepositFactor = FriendDepositFactor; + type MaxFriends = MaxFriends; + type RecoveryDeposit = RecoveryDeposit; +} + +parameter_types! { + pub const CandidateDeposit: Balance = 10 * DOLLARS; + pub const WrongSideDeduction: Balance = 2 * DOLLARS; + pub const MaxStrikes: u32 = 10; + pub const RotationPeriod: BlockNumber = 80 * HOURS; + pub const PeriodSpend: Balance = 500 * DOLLARS; + pub const MaxLockDuration: BlockNumber = 36 * 30 * DAYS; + pub const ChallengePeriod: BlockNumber = 7 * DAYS; +} + +impl pallet_society::Trait for Runtime { + type Event = Event; + type Currency = Balances; + type Randomness = RandomnessCollectiveFlip; + type CandidateDeposit = CandidateDeposit; + type WrongSideDeduction = WrongSideDeduction; + type MaxStrikes = MaxStrikes; + type PeriodSpend = PeriodSpend; + type MembershipChanged = (); + type RotationPeriod = RotationPeriod; + type MaxLockDuration = MaxLockDuration; + type FounderSetOrigin = pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; + type SuspensionJudgementOrigin = pallet_society::EnsureFounder; + type ChallengePeriod = ChallengePeriod; +} + construct_runtime!( pub enum Runtime where Block = Block, @@ -573,7 +619,9 @@ construct_runtime!( AuthorityDiscovery: pallet_authority_discovery::{Module, Call, Config}, Offences: pallet_offences::{Module, Call, Storage, Event}, RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Module, Call, Storage}, - Nicks: pallet_nicks::{Module, Call, Storage, Event}, + Identity: pallet_identity::{Module, Call, Storage, Event}, + Society: pallet_society::{Module, Call, Storage, Event, Config}, + Recovery: pallet_recovery::{Module, Call, Storage, Event}, } ); diff --git a/bin/node/testing/Cargo.toml b/bin/node/testing/Cargo.toml index 4449c7bd22d47..37e85452d5425 100644 --- a/bin/node/testing/Cargo.toml +++ b/bin/node/testing/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" [dependencies] pallet-balances = { version = "2.0.0", path = "../../../frame/balances" } -sc-client = { version = "2.0.0", path = "../../../client/" } +sc-client = { version = "0.8", path = "../../../client/" } codec = { package = "parity-scale-codec", version = "1.0.0" } pallet-contracts = { version = "2.0.0", path = "../../../frame/contracts" } pallet-grandpa = { version = "2.0.0", path = "../../../frame/grandpa" } @@ -20,9 +20,10 @@ sp-core = { version = "2.0.0", path = "../../../primitives/core" } sp-io = { version = "2.0.0", path = "../../../primitives/io" } frame-support = { version = "2.0.0", path = "../../../frame/support" } pallet-session = { version = "2.0.0", path = "../../../frame/session" } +pallet-society = { version = "2.0.0", path = "../../../frame/society" } sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } pallet-staking = { version = "2.0.0", path = "../../../frame/staking" } -sc-executor = { version = "2.0.0", path = "../../../client/executor" } +sc-executor = { version = "0.8", path = "../../../client/executor" } frame-system = { version = "2.0.0", path = "../../../frame/system" } substrate-test-client = { version = "2.0.0", path = "../../../test-utils/client" } pallet-timestamp = { version = "2.0.0", path = "../../../frame/timestamp" } diff --git a/bin/node/testing/src/genesis.rs b/bin/node/testing/src/genesis.rs index 44dd79a7f438a..183515f27274b 100644 --- a/bin/node/testing/src/genesis.rs +++ b/bin/node/testing/src/genesis.rs @@ -20,7 +20,7 @@ use crate::keyring::*; use sp_keyring::{Ed25519Keyring, Sr25519Keyring}; use node_runtime::{ GenesisConfig, BalancesConfig, SessionConfig, StakingConfig, SystemConfig, - GrandpaConfig, IndicesConfig, ContractsConfig, WASM_BINARY, + GrandpaConfig, IndicesConfig, ContractsConfig, SocietyConfig, WASM_BINARY, }; use node_runtime::constants::currency::*; use sp_core::ChangesTrieConfiguration; @@ -96,5 +96,10 @@ pub fn config(support_changes_trie: bool, code: Option<&[u8]>) -> GenesisConfig pallet_membership_Instance1: Some(Default::default()), pallet_sudo: Some(Default::default()), pallet_treasury: Some(Default::default()), + pallet_society: Some(SocietyConfig { + members: vec![alice(), bob()], + pot: 0, + max_members: 999, + }), } } diff --git a/bin/node/transaction-factory/Cargo.toml b/bin/node/transaction-factory/Cargo.toml index 8b10e10f41f81..f32a3c23e0e01 100644 --- a/bin/node/transaction-factory/Cargo.toml +++ b/bin/node/transaction-factory/Cargo.toml @@ -1,19 +1,19 @@ [package] name = "node-transaction-factory" -version = "2.0.0" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" [dependencies] sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } -sc-cli = { version = "2.0.0", path = "../../../client/cli" } +sc-cli = { version = "0.8.0", path = "../../../client/cli" } sc-client-api = { version = "2.0.0", path = "../../../client/api" } -sc-client = { version = "2.0.0", path = "../../../client" } +sc-client = { version = "0.8", path = "../../../client" } codec = { package = "parity-scale-codec", version = "1.0.0", features = ["derive"] } sp-consensus = { version = "0.8", path = "../../../primitives/consensus/common" } log = "0.4.8" sp-core = { version = "2.0.0", path = "../../../primitives/core" } sp-api = { version = "2.0.0", path = "../../../primitives/api" } sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sc-service = { version = "2.0.0", path = "../../../client/service" } +sc-service = { version = "0.8", path = "../../../client/service" } sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } diff --git a/client/Cargo.toml b/client/Cargo.toml index eaddd9f3dcfe6..7413fc23f9e2c 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -1,17 +1,17 @@ [package] name = "sc-client" -version = "2.0.0" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" [dependencies] -sc-block-builder = { version = "2.0.0", path = "block-builder" } +sc-block-builder = { version = "0.8", path = "block-builder" } sc-client-api = { version = "2.0.0", path = "api" } codec = { package = "parity-scale-codec", version = "1.0.0", features = ["derive"] } sp-consensus = { version = "0.8", path = "../primitives/consensus/common" } derive_more = { version = "0.99.2" } -sc-executor = { version = "2.0.0", path = "executor" } -sp-externalities = { version = "2.0.0", path = "../primitives/externalities" } +sc-executor = { version = "0.8", path = "executor" } +sp-externalities = { version = "0.8.0", path = "../primitives/externalities" } fnv = { version = "1.0.6" } futures = { version = "0.3.1", features = ["compat"] } hash-db = { version = "0.15.2" } @@ -27,7 +27,7 @@ sp-version = { version = "2.0.0", path = "../primitives/version" } sp-api = { version = "2.0.0", path = "../primitives/api" } sp-runtime = { version = "2.0.0", path = "../primitives/runtime" } sp-blockchain = { version = "2.0.0", path = "../primitives/blockchain" } -sp-state-machine = { version = "2.0.0", path = "../primitives/state-machine" } +sp-state-machine = { version = "0.8", path = "../primitives/state-machine" } sc-telemetry = { version = "2.0.0", path = "telemetry" } sp-trie = { version = "2.0.0", path = "../primitives/trie" } tracing = "0.1.10" diff --git a/client/api/Cargo.toml b/client/api/Cargo.toml index 36360a6fb8d20..651ee1434f7fc 100644 --- a/client/api/Cargo.toml +++ b/client/api/Cargo.toml @@ -8,8 +8,8 @@ edition = "2018" codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } sp-consensus = { version = "0.8", path = "../../primitives/consensus/common" } derive_more = { version = "0.99.2" } -sc-executor = { version = "2.0.0", path = "../executor" } -sp-externalities = { version = "2.0.0", path = "../../primitives/externalities" } +sc-executor = { version = "0.8", path = "../executor" } +sp-externalities = { version = "0.8.0", path = "../../primitives/externalities" } fnv = { version = "1.0.6" } futures = { version = "0.3.1" } hash-db = { version = "0.15.2", default-features = false } @@ -25,7 +25,7 @@ sp-std = { version = "2.0.0", default-features = false, path = "../../primitives sp-version = { version = "2.0.0", default-features = false, path = "../../primitives/version" } sp-api = { version = "2.0.0", path = "../../primitives/api" } sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-state-machine = { version = "2.0.0", path = "../../primitives/state-machine" } +sp-state-machine = { version = "0.8", path = "../../primitives/state-machine" } sc-telemetry = { version = "2.0.0", path = "../telemetry" } sp-trie = { version = "2.0.0", path = "../../primitives/trie" } sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 292031a4cb9de..a389af5671b32 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -18,11 +18,14 @@ use std::sync::Arc; use std::collections::HashMap; -use sp_core::ChangesTrieConfiguration; +use sp_core::ChangesTrieConfigurationRange; use sp_core::offchain::OffchainStorage; use sp_runtime::{generic::BlockId, Justification, Storage}; use sp_runtime::traits::{Block as BlockT, NumberFor, HasherFor}; -use sp_state_machine::{ChangesTrieStorage as StateChangesTrieStorage, ChangesTrieTransaction}; +use sp_state_machine::{ + ChangesTrieState, ChangesTrieStorage as StateChangesTrieStorage, ChangesTrieTransaction, + StorageCollection, ChildStorageCollection, +}; use crate::{ blockchain::{ Backend as BlockchainBackend, well_known_cache_keys @@ -45,12 +48,6 @@ pub type TransactionForSB = >>::Tra /// Extracts the transaction for the given backend. pub type TransactionFor = TransactionForSB, Block>; -/// In memory array of storage values. -pub type StorageCollection = Vec<(Vec, Option>)>; - -/// In memory arrays of storage values for multiple child tries. -pub type ChildStorageCollection = Vec<(Vec, StorageCollection)>; - /// Import operation summary. /// /// Contains information about the block that just got imported, @@ -248,8 +245,6 @@ pub trait Backend: AuxStore + Send + Sync { type Blockchain: BlockchainBackend; /// Associated state backend type. type State: StateBackend> + Send; - /// Changes trie storage. - type ChangesTrieStorage: PrunableStateChangesTrieStorage; /// Offchain workers local storage. type OffchainStorage: OffchainStorage; @@ -284,7 +279,7 @@ pub trait Backend: AuxStore + Send + Sync { fn usage_info(&self) -> Option; /// Returns reference to changes trie storage. - fn changes_trie_storage(&self) -> Option<&Self::ChangesTrieStorage>; + fn changes_trie_storage(&self) -> Option<&dyn PrunableStateChangesTrieStorage>; /// Returns a handle to offchain storage. fn offchain_storage(&self) -> Option; @@ -342,12 +337,16 @@ pub trait Backend: AuxStore + Send + Sync { pub trait PrunableStateChangesTrieStorage: StateChangesTrieStorage, NumberFor> { - /// Get number block of oldest, non-pruned changes trie. - fn oldest_changes_trie_block( - &self, - config: &ChangesTrieConfiguration, - best_finalized: NumberFor, - ) -> NumberFor; + /// Get reference to StateChangesTrieStorage. + fn storage(&self) -> &dyn StateChangesTrieStorage, NumberFor>; + /// Get configuration at given block. + fn configuration_at(&self, at: &BlockId) -> sp_blockchain::Result< + ChangesTrieConfigurationRange, Block::Hash> + >; + /// Get end block (inclusive) of oldest pruned max-level (or skewed) digest trie blocks range. + /// It is guaranteed that we have no any changes tries before (and including) this block. + /// It is guaranteed that all existing changes tries after this block are not yet pruned (if created). + fn oldest_pruned_digest_range_end(&self) -> NumberFor; } /// Mark for all Backend implementations, that are making use of state data, stored locally. @@ -364,3 +363,20 @@ pub trait RemoteBackend: Backend { /// locally, or prepares request to fetch that data from remote node. fn remote_blockchain(&self) -> Arc>; } + +/// Return changes tries state at given block. +pub fn changes_tries_state_at_block<'a, Block: BlockT>( + block: &BlockId, + maybe_storage: Option<&'a dyn PrunableStateChangesTrieStorage>, +) -> sp_blockchain::Result, NumberFor>>> { + let storage = match maybe_storage { + Some(storage) => storage, + None => return Ok(None), + }; + + let config_range = storage.configuration_at(block)?; + match config_range.config { + Some(config) => Ok(Some(ChangesTrieState::new(config, config_range.zero.0, storage.storage()))), + None => Ok(None), + } +} diff --git a/client/api/src/light.rs b/client/api/src/light.rs index fb3aeeab7c733..fc9b6dc6fd28c 100644 --- a/client/api/src/light.rs +++ b/client/api/src/light.rs @@ -26,7 +26,7 @@ use sp_runtime::{ }, generic::BlockId }; -use sp_core::ChangesTrieConfiguration; +use sp_core::ChangesTrieConfigurationRange; use sp_state_machine::StorageProof; use sp_blockchain::{ HeaderMetadata, well_known_cache_keys, HeaderBackend, Cache as BlockchainCache, @@ -96,8 +96,8 @@ pub struct RemoteReadChildRequest { /// Remote key changes read request. #[derive(Clone, Debug, PartialEq, Eq)] pub struct RemoteChangesRequest { - /// Changes trie configuration. - pub changes_trie_config: ChangesTrieConfiguration, + /// All changes trie configurations that are valid within [first_block; last_block]. + pub changes_trie_configs: Vec>, /// Query changes from range of blocks, starting (and including) with this hash... pub first_block: (Header::Number, Header::Hash), /// ...ending (and including) with this hash. Should come after first_block and diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index 767be822d7858..7381e5add19a1 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-authority-discovery" -version = "2.0.0" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" diff --git a/client/authority-discovery/src/tests.rs b/client/authority-discovery/src/tests.rs index 07e1c943be638..77ed6a1d948e2 100644 --- a/client/authority-discovery/src/tests.rs +++ b/client/authority-discovery/src/tests.rs @@ -212,12 +212,10 @@ impl ApiExt for RuntimeApi { unimplemented!("Not required for testing!") } - fn into_storage_changes< - T: sp_api::ChangesTrieStorage, sp_api::NumberFor> - >( + fn into_storage_changes( &self, _: &Self::StateBackend, - _: Option<&T>, + _: Option<&sp_api::ChangesTrieState, sp_api::NumberFor>>, _: ::Hash, ) -> std::result::Result, String> where Self: Sized diff --git a/client/basic-authorship/Cargo.toml b/client/basic-authorship/Cargo.toml index 4cdef01afd46b..8fa0b9f5cc20e 100644 --- a/client/basic-authorship/Cargo.toml +++ b/client/basic-authorship/Cargo.toml @@ -1,6 +1,6 @@ [package] -name = "sc-basic-authority" -version = "2.0.0" +name = "sc-basic-authorship" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" @@ -12,13 +12,13 @@ sp-api = { version = "2.0.0", path = "../../primitives/api" } sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } sp-core = { version = "2.0.0", path = "../../primitives/core" } sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sc-client = { version = "2.0.0", path = "../" } +sc-client = { version = "0.8", path = "../" } sc-client-api = { version = "2.0.0", path = "../api" } sp-consensus = { version = "0.8", path = "../../primitives/consensus/common" } sp-inherents = { version = "2.0.0", path = "../../primitives/inherents" } sc-telemetry = { version = "2.0.0", path = "../telemetry" } sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } -sc-block-builder = { version = "2.0.0", path = "../block-builder" } +sc-block-builder = { version = "0.8", path = "../block-builder" } tokio-executor = { version = "0.2.0-alpha.6", features = ["blocking"] } [dev-dependencies] diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index 543428b073478..6c5b063020073 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -34,6 +34,7 @@ use sp_transaction_pool::{TransactionPool, InPoolTransaction}; use sc_telemetry::{telemetry, CONSENSUS_INFO}; use sc_block_builder::BlockBuilderApi; use sp_api::{ProvideRuntimeApi, ApiExt}; +use futures::prelude::*; /// Proposer factory. pub struct ProposerFactory where A: TransactionPool { @@ -59,7 +60,7 @@ impl ProposerFactory, A> &mut self, parent_header: &::Header, now: Box time::Instant + Send + Sync>, - ) -> Result, A>, sp_blockchain::Error> { + ) -> Proposer, A> { let parent_hash = parent_header.hash(); let id = BlockId::hash(parent_hash); @@ -77,7 +78,7 @@ impl ProposerFactory, A> }), }; - Ok(proposer) + proposer } } @@ -94,14 +95,15 @@ impl sp_consensus::Environment for BlockBuilderApi + ApiExt>, { + type CreateProposer = future::Ready>; type Proposer = Proposer, A>; type Error = sp_blockchain::Error; fn init( &mut self, parent_header: &::Header, - ) -> Result { - self.init_with_now(parent_header, Box::new(time::Instant::now)) + ) -> Self::CreateProposer { + future::ready(Ok(self.init_with_now(parent_header, Box::new(time::Instant::now)))) } } @@ -203,6 +205,7 @@ impl ProposerInner, let pending_iterator = self.transaction_pool.ready(); debug!("Attempting to push transactions from the pool."); + debug!("Pool status: {:?}", self.transaction_pool.status()); for pending_tx in pending_iterator { if (self.now)() > deadline { debug!( @@ -324,7 +327,7 @@ mod tests { *value = new; old }) - ).unwrap(); + ); // when let deadline = time::Duration::from_secs(3); @@ -359,7 +362,7 @@ mod tests { let mut proposer = proposer_factory.init_with_now( &client.header(&block_id).unwrap().unwrap(), Box::new(move || time::Instant::now()), - ).unwrap(); + ); let deadline = time::Duration::from_secs(9); let proposal = futures::executor::block_on( @@ -372,9 +375,12 @@ mod tests { api.execute_block(&block_id, proposal.block).unwrap(); let state = backend.state_at(block_id).unwrap(); - let changes_trie_storage = backend.changes_trie_storage(); + let changes_trie_state = backend::changes_tries_state_at_block( + &block_id, + backend.changes_trie_storage(), + ).unwrap(); - let storage_changes = api.into_storage_changes(&state, changes_trie_storage, genesis_hash) + let storage_changes = api.into_storage_changes(&state, changes_trie_state.as_ref(), genesis_hash) .unwrap(); assert_eq!( diff --git a/client/basic-authorship/src/lib.rs b/client/basic-authorship/src/lib.rs index cf77c8a3f341a..f62134ce42aa7 100644 --- a/client/basic-authorship/src/lib.rs +++ b/client/basic-authorship/src/lib.rs @@ -19,7 +19,7 @@ //! # Example //! //! ``` -//! # use sc_basic_authority::ProposerFactory; +//! # use sc_basic_authorship::ProposerFactory; //! # use sp_consensus::{Environment, Proposer, RecordProof}; //! # use sp_runtime::generic::BlockId; //! # use std::{sync::Arc, time::Duration}; @@ -34,9 +34,12 @@ //! }; //! //! // From this factory, we create a `Proposer`. -//! let mut proposer = proposer_factory.init( +//! let proposer = proposer_factory.init( //! &client.header(&BlockId::number(0)).unwrap().unwrap(), -//! ).unwrap(); +//! ); +//! +//! // The proposer is created asynchronously. +//! let mut proposer = futures::executor::block_on(proposer).unwrap(); //! //! // This `Proposer` allows us to create a block proposition. //! // The proposer will grab transactions from the transaction pool, and put them into the block. diff --git a/client/block-builder/Cargo.toml b/client/block-builder/Cargo.toml index 1dcc9b05f5eb9..29531c4288bed 100644 --- a/client/block-builder/Cargo.toml +++ b/client/block-builder/Cargo.toml @@ -1,11 +1,11 @@ [package] name = "sc-block-builder" -version = "2.0.0" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" [dependencies] -sp-state-machine = { version = "2.0.0", path = "../../primitives/state-machine" } +sp-state-machine = { version = "0.8", path = "../../primitives/state-machine" } sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } sp-api = { version = "2.0.0", path = "../../primitives/api" } sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index f59f88f5ba237..9b403dead4415 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -191,14 +191,17 @@ where let proof = self.api.extract_proof(); let state = self.backend.state_at(self.block_id)?; - let changes_trie_storage = self.backend.changes_trie_storage(); + let changes_trie_state = backend::changes_tries_state_at_block( + &self.block_id, + self.backend.changes_trie_storage(), + )?; let parent_hash = self.parent_hash; // The unsafe is required because the consume requires that we drop/consume the inner api // (what we do here). let storage_changes = self.api.into_storage_changes( &state, - changes_trie_storage, + changes_trie_state.as_ref(), parent_hash, ); diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index 6e58083e8cc2e..81cbce5ea731c 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -70,7 +70,7 @@ impl GenesisSource { } } -impl<'a, G: RuntimeGenesis, E> BuildStorage for &'a ChainSpec { +impl BuildStorage for ChainSpec { fn build_storage(&self) -> Result { match self.genesis.resolve()? { Genesis::Runtime(gc) => gc.build_storage(), diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 05fc019d0a9d9..1c1d967c7d8d5 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-cli" -version = "2.0.0" +version = "0.8.0" authors = ["Parity Technologies "] description = "Substrate CLI interface." edition = "2018" @@ -16,8 +16,8 @@ time = "0.1.42" ansi_term = "0.12.1" lazy_static = "1.4.0" app_dirs = "1.2.1" -tokio = "0.2.1" -futures = { version = "0.3.1", features = ["compat"] } +tokio = "0.2" +futures = "0.3.1" fdlimit = "0.1.1" serde_json = "1.0.41" sp-panic-handler = { version = "2.0.0", path = "../../primitives/panic-handler" } @@ -26,10 +26,10 @@ sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } sc-network = { version = "0.8", path = "../network" } sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } sp-core = { version = "2.0.0", path = "../../primitives/core" } -sc-service = { version = "2.0.0", default-features = false, path = "../service" } -sp-state-machine = { version = "2.0.0", path = "../../primitives/state-machine" } +sc-service = { version = "0.8", default-features = false, path = "../service" } +sp-state-machine = { version = "0.8", path = "../../primitives/state-machine" } sc-telemetry = { version = "2.0.0", path = "../telemetry" } -prometheus-endpoint = { path = "../../utils/prometheus" } +prometheus-exporter = { path = "../../utils/prometheus" } sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } names = "0.11.0" structopt = "=0.3.7" diff --git a/client/cli/src/informant.rs b/client/cli/src/informant.rs index de7c376f09fee..312e4017d5ff0 100644 --- a/client/cli/src/informant.rs +++ b/client/cli/src/informant.rs @@ -17,7 +17,7 @@ //! Console informant. Prints sync progress and block events. Runs on the calling thread. use sc_client_api::BlockchainEvents; -use futures::{StreamExt, TryStreamExt, FutureExt, future, compat::Stream01CompatExt}; +use futures::prelude::*; use log::{info, warn, trace}; use sp_runtime::traits::Header; use sc_service::AbstractService; @@ -33,8 +33,7 @@ pub fn build(service: &impl AbstractService) -> impl futures::Future impl futures::Future = create_gauge( - "sync_target_number", - "block sync target number" - ); -} /// State of the informant display system. /// @@ -73,10 +63,7 @@ impl InformantDisplay { let (status, target) = match (net_status.sync_state, net_status.best_seen_block) { (SyncState::Idle, _) => ("Idle".into(), "".into()), (SyncState::Downloading, None) => (format!("Syncing{}", speed), "".into()), - (SyncState::Downloading, Some(n)) => { - SYNC_TARGET.set(n.unique_saturated_into() as u64); - (format!("Syncing{}", speed), format!(", target=#{}", n)) - } + (SyncState::Downloading, Some(n)) => (format!("Syncing{}", speed), format!(", target=#{}", n)), }; info!( diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index 96bf17999cf3e..79afaa03b4269 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -61,7 +61,7 @@ pub use traits::GetSharedParams; use app_dirs::{AppInfo, AppDataType}; use log::info; use lazy_static::lazy_static; -use futures::{Future, compat::Future01CompatExt, executor::block_on}; +use futures::{Future, executor::block_on}; use sc_telemetry::TelemetryEndpoints; use sp_runtime::generic::BlockId; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; @@ -142,8 +142,13 @@ pub fn load_spec(cli: &SharedParams, factory: F) -> error::Result PathBuf { +fn base_path( + cli: &SharedParams, + version: &VersionInfo, + default_base_path: Option, +) -> PathBuf { cli.base_path.clone() + .or(default_base_path) .unwrap_or_else(|| app_dirs::get_app_root( AppDataType::UserData, @@ -295,6 +300,78 @@ impl<'a, CC, RP> ParseAndPrepare<'a, CC, RP> where CC: GetSharedParams { } } +impl<'a, CC, RP> ParseAndPrepare<'a, CC, RP> { + /// Convert ParseAndPrepare to Configuration + pub fn into_configuration( + self, + spec_factory: S, + default_base_path: Option, + ) -> error::Result>> + where + C: Default, + G: RuntimeGenesis, + E: ChainSpecExtension, + S: FnOnce(&str) -> Result>, String>, + { + match self { + ParseAndPrepare::Run(c) => + Some(create_run_node_config( + c.params.left, + spec_factory, + c.impl_name, + c.version, + default_base_path, + )).transpose(), + ParseAndPrepare::BuildSpec(c) => { + let spec = load_spec(&c.params.shared_params, spec_factory)?; + + Some(create_build_spec_config( + &spec, + &c.params.shared_params, + c.version, + default_base_path, + )).transpose() + }, + ParseAndPrepare::ExportBlocks(c) => + Some(create_config_with_db_path( + spec_factory, + &c.params.shared_params, + c.version, + default_base_path, + )).transpose(), + ParseAndPrepare::ImportBlocks(c) => + Some(create_config_with_db_path( + spec_factory, + &c.params.shared_params, + c.version, + default_base_path, + )).transpose(), + ParseAndPrepare::CheckBlock(c) => + Some(create_config_with_db_path( + spec_factory, + &c.params.shared_params, + c.version, + default_base_path, + )).transpose(), + ParseAndPrepare::PurgeChain(c) => + Some(create_config_with_db_path( + spec_factory, + &c.params.shared_params, + c.version, + default_base_path, + )).transpose(), + ParseAndPrepare::RevertChain(c) => + Some(create_config_with_db_path( + spec_factory, + &c.params.shared_params, + c.version, + default_base_path, + )).transpose(), + ParseAndPrepare::CustomCommand(_) => Ok(None), + } + } +} + /// Command ready to run the main client. pub struct ParseAndPrepareRun<'a, RP> { params: MergeParameters, @@ -321,7 +398,11 @@ impl<'a, RP> ParseAndPrepareRun<'a, RP> { RS: FnOnce(Exit, RunCmd, RP, Configuration) -> Result<(), E> { let config = create_run_node_config( - self.params.left.clone(), spec_factory, self.impl_name, self.version, + self.params.left.clone(), + spec_factory, + self.impl_name, + self.version, + None, )?; run_service(exit, self.params.left, self.params.right, config).map_err(Into::into) @@ -350,11 +431,12 @@ impl<'a> ParseAndPrepareBuildSpec<'a> { let mut spec = load_spec(&self.params.shared_params, spec_factory)?; if spec.boot_nodes().is_empty() && !self.params.disable_default_bootnode { - let base_path = base_path(&self.params.shared_params, self.version); - let cfg = sc_service::Configuration::::default_with_spec_and_base_path( - spec.clone(), - Some(base_path), - ); + let cfg = create_build_spec_config::( + &spec, + &self.params.shared_params, + self.version, + None, + )?; let node_key = node_key_config( self.params.node_key_params, &Some(cfg.in_chain_config_dir(DEFAULT_NETWORK_CONFIG_PATH).expect("We provided a base_path")) @@ -401,7 +483,13 @@ impl<'a> ParseAndPrepareExport<'a> { E: ChainSpecExtension, Exit: IntoExit { - let config = create_config_with_db_path(spec_factory, &self.params.shared_params, self.version)?; + let mut config = create_config_with_db_path( + spec_factory, + &self.params.shared_params, + self.version, + None, + )?; + fill_config_keystore_in_memory(&mut config)?; if let DatabaseConfig::Path { ref path, .. } = &config.database { info!("DB path: {}", path.display()); @@ -426,8 +514,7 @@ impl<'a> ParseAndPrepareExport<'a> { }); let mut export_fut = builder(config)? - .export_blocks(file, from.into(), to, json) - .compat(); + .export_blocks(file, from.into(), to, json); let fut = futures::future::poll_fn(|cx| { if exit_recv.try_recv().is_ok() { return Poll::Ready(Ok(())); @@ -463,7 +550,12 @@ impl<'a> ParseAndPrepareImport<'a> { E: ChainSpecExtension, Exit: IntoExit { - let mut config = create_config_with_db_path(spec_factory, &self.params.shared_params, self.version)?; + let mut config = create_config_with_db_path( + spec_factory, + &self.params.shared_params, + self.version, + None, + )?; fill_import_params(&mut config, &self.params.import_params, sc_service::Roles::FULL)?; let file: Box = match self.params.input { @@ -485,8 +577,7 @@ impl<'a> ParseAndPrepareImport<'a> { }); let mut import_fut = builder(config)? - .import_blocks(file, false) - .compat(); + .import_blocks(file, false); let fut = futures::future::poll_fn(|cx| { if exit_recv.try_recv().is_ok() { return Poll::Ready(Ok(())); @@ -523,8 +614,14 @@ impl<'a> CheckBlock<'a> { E: ChainSpecExtension, Exit: IntoExit { - let mut config = create_config_with_db_path(spec_factory, &self.params.shared_params, self.version)?; + let mut config = create_config_with_db_path( + spec_factory, + &self.params.shared_params, + self.version, + None, + )?; fill_import_params(&mut config, &self.params.import_params, sc_service::Roles::FULL)?; + fill_config_keystore_in_memory(&mut config)?; let input = if self.params.input.starts_with("0x") { &self.params.input[2..] } else { &self.params.input[..] }; let block_id = match FromStr::from_str(input) { @@ -537,8 +634,7 @@ impl<'a> CheckBlock<'a> { let start = std::time::Instant::now(); let check = builder(config)? - .check_block(block_id) - .compat(); + .check_block(block_id); let mut runtime = tokio::runtime::Runtime::new().unwrap(); runtime.block_on(check)?; println!("Completed in {} ms.", start.elapsed().as_millis()); @@ -562,9 +658,13 @@ impl<'a> ParseAndPreparePurge<'a> { G: RuntimeGenesis, E: ChainSpecExtension, { - let config = create_config_with_db_path::<(), _, _, _>( - spec_factory, &self.params.shared_params, self.version + let mut config = create_config_with_db_path::<(), _, _, _>( + spec_factory, + &self.params.shared_params, + self.version, + None, )?; + fill_config_keystore_in_memory(&mut config)?; let db_path = match config.database { DatabaseConfig::Path { path, .. } => path, _ => { @@ -626,9 +726,14 @@ impl<'a> ParseAndPrepareRevert<'a> { G: RuntimeGenesis, E: ChainSpecExtension, { - let config = create_config_with_db_path( - spec_factory, &self.params.shared_params, self.version + let mut config = create_config_with_db_path( + spec_factory, + &self.params.shared_params, + self.version, + None, )?; + fill_config_keystore_in_memory(&mut config)?; + let blocks = self.params.num.parse()?; builder(config)?.revert_chain(blocks)?; Ok(()) @@ -752,6 +857,16 @@ fn input_keystore_password() -> Result { .map_err(|e| format!("{:?}", e)) } +/// Use in memory keystore config when it is not required at all. +fn fill_config_keystore_in_memory(config: &mut sc_service::Configuration) + -> Result<(), String> +{ + match &mut config.keystore { + cfg @ KeystoreConfig::None => { *cfg = KeystoreConfig::InMemory; Ok(()) }, + _ => Err("Keystore config specified when it should not be!".into()), + } +} + /// Fill the password field of the given config instance. fn fill_config_keystore_password_and_path( config: &mut sc_service::Configuration, @@ -840,7 +955,11 @@ pub fn fill_import_params( } fn create_run_node_config( - cli: RunCmd, spec_factory: S, impl_name: &'static str, version: &VersionInfo, + cli: RunCmd, + spec_factory: S, + impl_name: &'static str, + version: &VersionInfo, + default_base_path: Option, ) -> error::Result> where C: Default, @@ -848,7 +967,12 @@ where E: ChainSpecExtension, S: FnOnce(&str) -> Result>, String>, { - let mut config = create_config_with_db_path(spec_factory, &cli.shared_params, &version)?; + let mut config = create_config_with_db_path( + spec_factory, + &cli.shared_params, + &version, + default_base_path, + )?; fill_config_keystore_password_and_path(&mut config, &cli)?; @@ -925,7 +1049,7 @@ where config.rpc_http = Some(parse_address(&format!("{}:{}", rpc_interface, 9933), cli.rpc_port)?); config.rpc_ws = Some(parse_address(&format!("{}:{}", ws_interface, 9944), cli.ws_port)?); config.prometheus_port = Some( - parse_address(&format!("{}:{}", prometheus_interface, 9955), cli.prometheus_port)? + parse_address(&format!("{}:{}", prometheus_interface, 9615), cli.prometheus_port)? ); config.rpc_ws_max_connections = cli.ws_max_connections; @@ -982,7 +1106,10 @@ fn interface_str( /// Creates a configuration including the database path. pub fn create_config_with_db_path( - spec_factory: S, cli: &SharedParams, version: &VersionInfo, + spec_factory: S, + cli: &SharedParams, + version: &VersionInfo, + default_base_path: Option, ) -> error::Result> where C: Default, @@ -991,7 +1118,7 @@ where S: FnOnce(&str) -> Result>, String>, { let spec = load_spec(cli, spec_factory)?; - let base_path = base_path(cli, version); + let base_path = base_path(cli, version, default_base_path); let mut config = sc_service::Configuration::default_with_spec_and_base_path( spec.clone(), @@ -1006,6 +1133,27 @@ where Ok(config) } +/// Creates a configuration including the base path and the shared params +fn create_build_spec_config( + spec: &ChainSpec, + cli: &SharedParams, + version: &VersionInfo, + default_base_path: Option, +) -> error::Result> +where + C: Default, + G: RuntimeGenesis, + E: ChainSpecExtension, +{ + let base_path = base_path(&cli, version, default_base_path); + let cfg = sc_service::Configuration::::default_with_spec_and_base_path( + spec.clone(), + Some(base_path), + ); + + Ok(cfg) +} + /// Internal trait used to cast to a dynamic type that implements Read and Seek. trait ReadPlusSeek: Read + Seek {} @@ -1243,6 +1391,7 @@ mod tests { |_| Ok(Some(chain_spec.clone())), "test", &version_info, + None, ).unwrap(); let expected_path = match keystore_path { @@ -1253,4 +1402,44 @@ mod tests { assert_eq!(expected_path, node_config.keystore.path().unwrap().to_owned()); } } + + #[test] + fn parse_and_prepare_into_configuration() { + let chain_spec = ChainSpec::from_genesis( + "test", + "test-id", + || (), + Vec::new(), + None, + None, + None, + None, + ); + let version = VersionInfo { + name: "test", + version: "42", + commit: "234234", + executable_name: "test", + description: "cool test", + author: "universe", + support_url: "com", + }; + let spec_factory = |_: &str| Ok(Some(chain_spec.clone())); + + let args = vec!["substrate", "--dev", "--state-cache-size=42"]; + let pnp = parse_and_prepare::(&version, "test", args); + let config = pnp.into_configuration::<(), _, _, _>(spec_factory, None).unwrap().unwrap(); + assert_eq!(config.roles, sc_service::Roles::AUTHORITY); + assert_eq!(config.state_cache_size, 42); + + let args = vec!["substrate", "import-blocks", "--dev"]; + let pnp = parse_and_prepare::(&version, "test", args); + let config = pnp.into_configuration::<(), _, _, _>(spec_factory, None).unwrap().unwrap(); + assert_eq!(config.roles, sc_service::Roles::FULL); + + let args = vec!["substrate", "--base-path=/foo"]; + let pnp = parse_and_prepare::(&version, "test", args); + let config = pnp.into_configuration::<(), _, _, _>(spec_factory, Some("/bar".into())).unwrap().unwrap(); + assert_eq!(config.config_dir, Some("/foo".into())); + } } diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml index ddae989b4170f..3d4c5a9157bef 100644 --- a/client/consensus/aura/Cargo.toml +++ b/client/consensus/aura/Cargo.toml @@ -9,13 +9,12 @@ edition = "2018" sp-application-crypto = { version = "2.0.0", path = "../../../primitives/application-crypto" } sp-consensus-aura = { version = "0.8", path = "../../../primitives/consensus/aura" } sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } -sc-client = { version = "2.0.0", path = "../../" } +sc-client = { version = "0.8", path = "../../" } sc-client-api = { version = "2.0.0", path = "../../api" } codec = { package = "parity-scale-codec", version = "1.0.0" } sp-consensus = { version = "0.8", path = "../../../primitives/consensus/common" } derive_more = "0.99.2" -futures = { version = "0.3.1", features = ["compat"] } -futures01 = { package = "futures", version = "0.1" } +futures = "0.3.1" futures-timer = "0.4.0" sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } sc-keystore = { version = "2.0.0", path = "../../keystore" } @@ -33,11 +32,12 @@ sc-telemetry = { version = "2.0.0", path = "../../telemetry" } [dev-dependencies] sp-keyring = { version = "2.0.0", path = "../../../primitives/keyring" } -sc-executor = { version = "2.0.0", path = "../../executor" } +sc-executor = { version = "0.8", path = "../../executor" } sc-network = { version = "0.8", path = "../../network" } -sc-network-test = { version = "2.0.0", path = "../../network/test" } -sc-service = { version = "2.0.0", path = "../../service" } +sc-network-test = { version = "0.8.0", path = "../../network/test" } +sc-service = { version = "0.8", path = "../../service" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } tokio = "0.1.22" env_logger = "0.7.0" tempfile = "3.1.0" +futures01 = { package = "futures", version = "0.1" } diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 13a4c5a777144..95dc08afaf5ef 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -153,7 +153,7 @@ pub fn start_aura( force_authoring: bool, keystore: KeyStorePtr, can_author_with: CAW, -) -> Result, sp_consensus::Error> where +) -> Result, sp_consensus::Error> where B: BlockT, C: ProvideRuntimeApi + BlockOf + ProvideCache + AuxStore + Send + Sync, C::Api: AuraApi>, @@ -189,7 +189,7 @@ pub fn start_aura( inherent_data_providers, AuraSlotCompatible, can_author_with, - ).map(|()| Ok::<(), ()>(())).compat()) + )) } struct AuraWorker { @@ -217,6 +217,9 @@ impl sc_consensus_slots::SimpleSlotWorker for AuraW { type BlockImport = I; type SyncOracle = SO; + type CreateProposer = Pin> + Send + 'static + >>; type Proposer = E::Proposer; type Claim = P; type EpochData = Vec>; @@ -302,10 +305,10 @@ impl sc_consensus_slots::SimpleSlotWorker for AuraW &mut self.sync_oracle } - fn proposer(&mut self, block: &B::Header) -> Result { - self.env.init(block).map_err(|e| { + fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer { + Box::pin(self.env.init(block).map_err(|e| { sp_consensus::Error::ClientImport(format!("{:?}", e)).into() - }) + })) } fn proposing_remaining_duration( @@ -672,19 +675,21 @@ fn initialize_authorities_cache(client: &C) -> Result<(), ConsensusErro }; // check if we already have initialized the cache + let map_err = |error| sp_consensus::Error::from(sp_consensus::Error::ClientImport( + format!( + "Error initializing authorities cache: {}", + error, + ))); + let genesis_id = BlockId::Number(Zero::zero()); let genesis_authorities: Option> = cache .get_at(&well_known_cache_keys::AUTHORITIES, &genesis_id) + .unwrap_or(None) .and_then(|(_, _, v)| Decode::decode(&mut &v[..]).ok()); if genesis_authorities.is_some() { return Ok(()); } - let map_err = |error| sp_consensus::Error::from(sp_consensus::Error::ClientImport( - format!( - "Error initializing authorities cache: {}", - error, - ))); let genesis_authorities = authorities(client, &genesis_id)?; cache.initialize(&well_known_cache_keys::AUTHORITIES, genesis_authorities.encode()) .map_err(map_err)?; @@ -703,6 +708,7 @@ fn authorities(client: &C, at: &BlockId) -> Result, Consensus .cache() .and_then(|cache| cache .get_at(&well_known_cache_keys::AUTHORITIES, at) + .unwrap_or(None) .and_then(|(_, _, v)| Decode::decode(&mut &v[..]).ok()) ) .or_else(|| AuraApi::authorities(&*client.runtime_api(), at).ok()) @@ -874,12 +880,13 @@ mod tests { impl Environment for DummyFactory { type Proposer = DummyProposer; + type CreateProposer = futures::future::Ready>; type Error = Error; fn init(&mut self, parent_header: &::Header) - -> Result + -> Self::CreateProposer { - Ok(DummyProposer(parent_header.number + 1, self.0.clone())) + futures::future::ready(Ok(DummyProposer(parent_header.number + 1, self.0.clone()))) } } @@ -1019,7 +1026,10 @@ mod tests { false, keystore, sp_consensus::AlwaysCanAuthor, - ).expect("Starts aura"); + ) + .expect("Starts aura") + .unit_error() + .compat(); runtime.spawn(aura); } @@ -1030,7 +1040,7 @@ mod tests { })); runtime.block_on(future::join_all(import_notifications) - .map(|_| Ok::<(), ()>(())).compat()).unwrap(); + .unit_error().compat()).unwrap(); } #[test] diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index eb2875b3d37ff..55b0e0cf20217 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -20,7 +20,7 @@ sp-timestamp = { version = "2.0.0", path = "../../../primitives/timestamp" } sc-telemetry = { version = "2.0.0", path = "../../telemetry" } sc-keystore = { version = "2.0.0", path = "../../keystore" } sc-client-api = { version = "2.0.0", path = "../../api" } -sc-client = { version = "2.0.0", path = "../../" } +sc-client = { version = "0.8", path = "../../" } sp-api = { version = "2.0.0", path = "../../../primitives/api" } sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } @@ -29,8 +29,7 @@ sc-consensus-uncles = { version = "0.8", path = "../uncles" } sc-consensus-slots = { version = "0.8", path = "../slots" } sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } fork-tree = { version = "2.0.0", path = "../../../utils/fork-tree" } -futures = { version = "0.3.1", features = ["compat"] } -futures01 = { package = "futures", version = "0.1" } +futures = "0.3.1" futures-timer = "0.4.0" parking_lot = "0.9.0" log = "0.4.8" @@ -42,15 +41,16 @@ derive_more = "0.99.2" [dev-dependencies] sp-keyring = { version = "2.0.0", path = "../../../primitives/keyring" } -sc-executor = { version = "2.0.0", path = "../../executor" } +sc-executor = { version = "0.8", path = "../../executor" } sc-network = { version = "0.8", path = "../../network" } -sc-network-test = { version = "2.0.0", path = "../../network/test" } -sc-service = { version = "2.0.0", path = "../../service" } +sc-network-test = { version = "0.8.0", path = "../../network/test" } +sc-service = { version = "0.8", path = "../../service" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } -sc-block-builder = { version = "2.0.0", path = "../../block-builder" } +sc-block-builder = { version = "0.8", path = "../../block-builder" } tokio = "0.1.22" env_logger = "0.7.0" tempfile = "3.1.0" +futures01 = { package = "futures", version = "0.1" } [features] test-helpers = [] diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index bbf19b706601f..770f8a4eec6e2 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -289,7 +289,7 @@ pub fn start_babe(BabeParams { babe_link, can_author_with, }: BabeParams) -> Result< - impl futures01::Future, + impl futures::Future, sp_consensus::Error, > where B: BlockT, @@ -325,7 +325,7 @@ pub fn start_babe(BabeParams { )?; babe_info!("Starting BABE Authorship worker"); - let slot_worker = sc_consensus_slots::start_slot_worker( + Ok(sc_consensus_slots::start_slot_worker( config.0, select_chain, worker, @@ -333,9 +333,7 @@ pub fn start_babe(BabeParams { inherent_data_providers, babe_link.time_source, can_author_with, - ); - - Ok(slot_worker.map(|_| Ok::<(), ()>(())).compat()) + )) } struct BabeWorker { @@ -365,6 +363,9 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeWork type EpochData = Epoch; type Claim = (BabePreDigest, AuthorityPair); type SyncOracle = SO; + type CreateProposer = Pin> + Send + 'static + >>; type Proposer = E::Proposer; type BlockImport = I; @@ -468,10 +469,10 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeWork &mut self.sync_oracle } - fn proposer(&mut self, block: &B::Header) -> Result { - self.env.init(block).map_err(|e| { + fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer { + Box::pin(self.env.init(block).map_err(|e| { sp_consensus::Error::ClientImport(format!("{:?}", e)) - }) + })) } fn proposing_remaining_duration( diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 2ddb67fe4796e..880e028000b42 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -72,23 +72,24 @@ struct DummyProposer { } impl Environment for DummyFactory { + type CreateProposer = future::Ready>; type Proposer = DummyProposer; type Error = Error; fn init(&mut self, parent_header: &::Header) - -> Result + -> Self::CreateProposer { let parent_slot = crate::find_pre_digest::(parent_header) .expect("parent header has a pre-digest") .slot_number(); - Ok(DummyProposer { + future::ready(Ok(DummyProposer { factory: self.clone(), parent_hash: parent_header.hash(), parent_number: *parent_header.number(), parent_slot, - }) + })) } } @@ -419,7 +420,7 @@ fn run_one_test( babe_link: data.link.clone(), keystore, can_author_with: sp_consensus::AlwaysCanAuthor, - }).expect("Starts babe")); + }).expect("Starts babe").unit_error().compat()); } runtime.spawn(futures01::future::poll_fn(move || { @@ -428,7 +429,7 @@ fn run_one_test( })); runtime.block_on(future::join_all(import_notifications) - .map(|_| Ok::<(), ()>(())).compat()).unwrap(); + .unit_error().compat()).unwrap(); } #[test] @@ -547,7 +548,7 @@ fn propose_and_import_block( proposer_factory: &mut DummyFactory, block_import: &mut BoxBlockImport, ) -> sp_core::H256 { - let mut proposer = proposer_factory.init(parent).unwrap(); + let mut proposer = futures::executor::block_on(proposer_factory.init(parent)).unwrap(); let slot_number = slot_number.unwrap_or_else(|| { let parent_pre_digest = find_pre_digest::(parent).unwrap(); diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index 5c491057e576c..2ea0cbdf0f7ae 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -486,7 +486,7 @@ fn mine_loop( } let mut aux = PowAux::read(client, &best_hash)?; - let mut proposer = env.init(&best_header) + let mut proposer = futures::executor::block_on(env.init(&best_header)) .map_err(|e| Error::Environment(format!("{:?}", e)))?; let inherent_data = inherent_data_providers diff --git a/client/consensus/slots/Cargo.toml b/client/consensus/slots/Cargo.toml index 3f1a572249899..89ee14b8fa463 100644 --- a/client/consensus/slots/Cargo.toml +++ b/client/consensus/slots/Cargo.toml @@ -12,7 +12,7 @@ sc-client-api = { version = "2.0.0", path = "../../api" } sp-core = { version = "2.0.0", path = "../../../primitives/core" } sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-state-machine = { version = "2.0.0", path = "../../../primitives/state-machine" } +sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } sp-api = { version = "2.0.0", path = "../../../primitives/api" } sc-telemetry = { version = "2.0.0", path = "../../telemetry" } sp-consensus = { version = "0.8", path = "../../../primitives/consensus/common" } diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index a69c710a7e944..3aa243af72b06 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -70,6 +70,10 @@ pub trait SimpleSlotWorker { /// A handle to a `SyncOracle`. type SyncOracle: SyncOracle; + /// The type of future resolving to the proposer. + type CreateProposer: Future> + + Send + Unpin + 'static; + /// The type of proposer to use to build blocks. type Proposer: Proposer; @@ -129,7 +133,7 @@ pub trait SimpleSlotWorker { fn sync_oracle(&mut self) -> &mut Self::SyncOracle; /// Returns a `Proposer` to author on top of the given block. - fn proposer(&mut self, block: &B::Header) -> Result; + fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer; /// Remaining duration of the slot. fn slot_remaining_duration(&self, slot_info: &SlotInfo) -> Duration { @@ -216,32 +220,30 @@ pub trait SimpleSlotWorker { "timestamp" => timestamp, ); - let mut proposer = match self.proposer(&chain_head) { - Ok(proposer) => proposer, - Err(err) => { - warn!("Unable to author block in slot {:?}: {:?}", slot_number, err); + let awaiting_proposer = self.proposer(&chain_head).map_err(move |err| { + warn!("Unable to author block in slot {:?}: {:?}", slot_number, err); - telemetry!(CONSENSUS_WARN; "slots.unable_authoring_block"; - "slot" => slot_number, "err" => ?err - ); + telemetry!(CONSENSUS_WARN; "slots.unable_authoring_block"; + "slot" => slot_number, "err" => ?err + ); - return Box::pin(future::ready(Ok(()))); - }, - }; + err + }); let slot_remaining_duration = self.slot_remaining_duration(&slot_info); let proposing_remaining_duration = self.proposing_remaining_duration(&chain_head, &slot_info); let logs = self.pre_digest_data(slot_number, &claim); // deadline our production to approx. the end of the slot - let proposing = proposer.propose( + let proposing = awaiting_proposer.and_then(move |mut proposer| proposer.propose( slot_info.inherent_data, sp_runtime::generic::Digest { logs, }, slot_remaining_duration, RecordProof::No, - ).map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e))); + ).map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e)))); + let delay: Box + Unpin + Send> = match proposing_remaining_duration { Some(r) => Box::new(Delay::new(r)), None => Box::new(future::pending()), diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index d34b04a17edd3..dcc7df29ca11c 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-client-db" -version = "2.0.0" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" @@ -18,10 +18,10 @@ codec = { package = "parity-scale-codec", version = "1.0.0", features = ["derive sc-client-api = { version = "2.0.0", path = "../api" } sp-core = { version = "2.0.0", path = "../../primitives/core" } sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sc-client = { version = "2.0.0", path = "../" } -sp-state-machine = { version = "2.0.0", path = "../../primitives/state-machine" } -sc-executor = { version = "2.0.0", path = "../executor" } -sc-state-db = { version = "2.0.0", path = "../state-db" } +sc-client = { version = "0.8", path = "../" } +sp-state-machine = { version = "0.8", path = "../../primitives/state-machine" } +sc-executor = { version = "0.8", path = "../executor" } +sc-state-db = { version = "0.8", path = "../state-db" } sp-trie = { version = "2.0.0", path = "../../primitives/trie" } sp-consensus = { version = "0.8", path = "../../primitives/consensus/common" } sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } @@ -31,6 +31,8 @@ sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } env_logger = "0.7.0" quickcheck = "0.9" +kvdb-rocksdb = "0.4" +tempdir = "0.3" [features] default = [] diff --git a/client/db/src/cache/list_cache.rs b/client/db/src/cache/list_cache.rs index 6345486f17f94..72278a1e85e6d 100644 --- a/client/db/src/cache/list_cache.rs +++ b/client/db/src/cache/list_cache.rs @@ -94,6 +94,12 @@ pub enum CommitOperation { BlockReverted(BTreeMap>>), } +/// A set of commit operations. +#[derive(Debug)] +pub struct CommitOperations { + operations: Vec>, +} + /// Single fork of list-based cache. #[derive(Debug)] #[cfg_attr(test, derive(PartialEq))] @@ -123,21 +129,17 @@ impl> ListCache storage: S, pruning_strategy: PruningStrategy>, best_finalized_block: ComplexBlockId, - ) -> Self { + ) -> ClientResult { let (best_finalized_entry, unfinalized) = storage.read_meta() - .and_then(|meta| read_forks(&storage, meta)) - .unwrap_or_else(|error| { - warn!(target: "db", "Unable to initialize list cache: {}. Restarting", error); - (None, Vec::new()) - }); + .and_then(|meta| read_forks(&storage, meta))?; - ListCache { + Ok(ListCache { storage, pruning_strategy, best_finalized_block, best_finalized_entry, unfinalized, - } + }) } /// Get reference to the storage. @@ -145,6 +147,12 @@ impl> ListCache &self.storage } + /// Get unfinalized forks reference. + #[cfg(test)] + pub fn unfinalized(&self) -> &[Fork] { + &self.unfinalized + } + /// Get value valid at block. pub fn value_at_block( &self, @@ -156,8 +164,8 @@ impl> ListCache // BUT since we're not guaranteeing to provide correct values for forks // behind the finalized block, check if the block is finalized first - if !chain::is_finalized_block(&self.storage, at, Bounded::max_value())? { - return Ok(None); + if !chain::is_finalized_block(&self.storage, &at, Bounded::max_value())? { + return Err(ClientError::NotInFinalizedChain); } self.best_finalized_entry.as_ref() @@ -171,11 +179,14 @@ impl> ListCache // IF there's no matching fork, ensure that this isn't a block from a fork that has forked // behind the best finalized block and search at finalized fork - match self.find_unfinalized_fork(at)? { + match self.find_unfinalized_fork(&at)? { Some(fork) => Some(&fork.head), None => match self.best_finalized_entry.as_ref() { - Some(best_finalized_entry) if chain::is_connected_to_block(&self.storage, &best_finalized_entry.valid_from, at)? => - Some(best_finalized_entry), + Some(best_finalized_entry) if chain::is_connected_to_block( + &self.storage, + &at, + &best_finalized_entry.valid_from, + )? => Some(best_finalized_entry), _ => None, }, } @@ -198,9 +209,98 @@ impl> ListCache block: ComplexBlockId, value: Option, entry_type: EntryType, + operations: &mut CommitOperations, + ) -> ClientResult<()> { + Ok(operations.append(self.do_on_block_insert(tx, parent, block, value, entry_type, operations)?)) + } + + /// When previously inserted block is finalized. + pub fn on_block_finalize>( + &self, + tx: &mut Tx, + parent: ComplexBlockId, + block: ComplexBlockId, + operations: &mut CommitOperations, + ) -> ClientResult<()> { + Ok(operations.append(self.do_on_block_finalize(tx, parent, block, operations)?)) + } + + /// When block is reverted. + pub fn on_block_revert>( + &self, + tx: &mut Tx, + reverted_block: &ComplexBlockId, + operations: &mut CommitOperations, + ) -> ClientResult<()> { + Ok(operations.append(Some(self.do_on_block_revert(tx, reverted_block)?))) + } + + /// When transaction is committed. + pub fn on_transaction_commit(&mut self, ops: CommitOperations) { + for op in ops.operations { + match op { + CommitOperation::AppendNewBlock(index, best_block) => { + let mut fork = self.unfinalized.get_mut(index) + .expect("ListCache is a crate-private type; + internal clients of ListCache are committing transaction while cache is locked; + CommitOperation holds valid references while cache is locked; qed"); + fork.best_block = Some(best_block); + }, + CommitOperation::AppendNewEntry(index, entry) => { + let mut fork = self.unfinalized.get_mut(index) + .expect("ListCache is a crate-private type; + internal clients of ListCache are committing transaction while cache is locked; + CommitOperation holds valid references while cache is locked; qed"); + fork.best_block = Some(entry.valid_from.clone()); + fork.head = entry; + }, + CommitOperation::AddNewFork(entry) => { + self.unfinalized.push(Fork { + best_block: Some(entry.valid_from.clone()), + head: entry, + }); + }, + CommitOperation::BlockFinalized(block, finalizing_entry, forks) => { + self.best_finalized_block = block; + if let Some(finalizing_entry) = finalizing_entry { + self.best_finalized_entry = Some(finalizing_entry); + } + for fork_index in forks.iter().rev() { + self.unfinalized.remove(*fork_index); + } + }, + CommitOperation::BlockReverted(forks) => { + for (fork_index, updated_fork) in forks.into_iter().rev() { + match updated_fork { + Some(updated_fork) => self.unfinalized[fork_index] = updated_fork, + None => { self.unfinalized.remove(fork_index); }, + } + } + }, + } + } + } + + fn do_on_block_insert>( + &self, + tx: &mut Tx, + parent: ComplexBlockId, + block: ComplexBlockId, + value: Option, + entry_type: EntryType, + operations: &CommitOperations, ) -> ClientResult>> { // this guarantee is currently provided by LightStorage && we're relying on it here - debug_assert!(entry_type != EntryType::Final || self.best_finalized_block.hash == parent.hash); + let prev_operation = operations.operations.last(); + debug_assert!( + entry_type != EntryType::Final || + self.best_finalized_block.hash == parent.hash || + match prev_operation { + Some(&CommitOperation::BlockFinalized(ref best_finalized_block, _, _)) + => best_finalized_block.hash == parent.hash, + _ => false, + } + ); // we do not store any values behind finalized if block.number != Zero::zero() && self.best_finalized_block.number >= block.number { @@ -296,7 +396,7 @@ impl> ListCache } // cleanup database from abandoned unfinalized forks and obsolete finalized entries - let abandoned_forks = self.destroy_abandoned_forks(tx, &block); + let abandoned_forks = self.destroy_abandoned_forks(tx, &block, prev_operation); self.prune_finalized_entries(tx, &block); match new_storage_entry { @@ -310,34 +410,39 @@ impl> ListCache } } - /// When previously inserted block is finalized. - pub fn on_block_finalize>( + fn do_on_block_finalize>( &self, tx: &mut Tx, parent: ComplexBlockId, block: ComplexBlockId, + operations: &CommitOperations, ) -> ClientResult>> { - // this guarantee is currently provided by LightStorage && we're relying on it here - debug_assert_eq!(self.best_finalized_block.hash, parent.hash); + // this guarantee is currently provided by db backend && we're relying on it here + let prev_operation = operations.operations.last(); + debug_assert!( + self.best_finalized_block.hash == parent.hash || + match prev_operation { + Some(&CommitOperation::BlockFinalized(ref best_finalized_block, _, _)) + => best_finalized_block.hash == parent.hash, + _ => false, + } + ); // there could be at most one entry that is finalizing let finalizing_entry = self.storage.read_entry(&block)? .map(|entry| entry.into_entry(block.clone())); // cleanup database from abandoned unfinalized forks and obsolete finalized entries - let abandoned_forks = self.destroy_abandoned_forks(tx, &block); + let abandoned_forks = self.destroy_abandoned_forks(tx, &block, prev_operation); self.prune_finalized_entries(tx, &block); - let update_meta = finalizing_entry.is_some(); let operation = CommitOperation::BlockFinalized(block, finalizing_entry, abandoned_forks); - if update_meta { - tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); - } + tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); + Ok(Some(operation)) } - /// When block is reverted. - pub fn on_block_revert>( + fn do_on_block_revert>( &self, tx: &mut Tx, reverted_block: &ComplexBlockId, @@ -374,50 +479,6 @@ impl> ListCache Ok(operation) } - /// When transaction is committed. - pub fn on_transaction_commit(&mut self, op: CommitOperation) { - match op { - CommitOperation::AppendNewBlock(index, best_block) => { - let mut fork = self.unfinalized.get_mut(index) - .expect("ListCache is a crate-private type; - internal clients of ListCache are committing transaction while cache is locked; - CommitOperation holds valid references while cache is locked; qed"); - fork.best_block = Some(best_block); - }, - CommitOperation::AppendNewEntry(index, entry) => { - let mut fork = self.unfinalized.get_mut(index) - .expect("ListCache is a crate-private type; - internal clients of ListCache are committing transaction while cache is locked; - CommitOperation holds valid references while cache is locked; qed"); - fork.best_block = Some(entry.valid_from.clone()); - fork.head = entry; - }, - CommitOperation::AddNewFork(entry) => { - self.unfinalized.push(Fork { - best_block: Some(entry.valid_from.clone()), - head: entry, - }); - }, - CommitOperation::BlockFinalized(block, finalizing_entry, forks) => { - self.best_finalized_block = block; - if let Some(finalizing_entry) = finalizing_entry { - self.best_finalized_entry = Some(finalizing_entry); - } - for fork_index in forks.iter().rev() { - self.unfinalized.remove(*fork_index); - } - }, - CommitOperation::BlockReverted(forks) => { - for (fork_index, updated_fork) in forks.into_iter().rev() { - match updated_fork { - Some(updated_fork) => self.unfinalized[fork_index] = updated_fork, - None => { self.unfinalized.remove(fork_index); }, - } - } - }, - } - } - /// Prune old finalized entries. fn prune_finalized_entries>( &self, @@ -475,10 +536,22 @@ impl> ListCache fn destroy_abandoned_forks>( &self, tx: &mut Tx, - block: &ComplexBlockId + block: &ComplexBlockId, + prev_operation: Option<&CommitOperation>, ) -> BTreeSet { - let mut destroyed = BTreeSet::new(); - for (index, fork) in self.unfinalized.iter().enumerate() { + // if some block has been finalized already => take it into account + let prev_abandoned_forks = match prev_operation { + Some(&CommitOperation::BlockFinalized(_, _, ref abandoned_forks)) => Some(abandoned_forks), + _ => None, + }; + + let mut destroyed = prev_abandoned_forks.cloned().unwrap_or_else(|| BTreeSet::new()); + let live_unfinalized = self.unfinalized.iter() + .enumerate() + .filter(|(idx, _)| prev_abandoned_forks + .map(|prev_abandoned_forks| !prev_abandoned_forks.contains(idx)) + .unwrap_or(true)); + for (index, fork) in live_unfinalized { if fork.head.valid_from.number == block.number { destroyed.insert(index); if fork.head.valid_from.hash != block.hash { @@ -493,7 +566,10 @@ impl> ListCache } /// Search unfinalized fork where given block belongs. - fn find_unfinalized_fork(&self, block: &ComplexBlockId) -> ClientResult>> { + fn find_unfinalized_fork( + &self, + block: &ComplexBlockId, + ) -> ClientResult>> { for unfinalized in &self.unfinalized { if unfinalized.matches(&self.storage, block)? { return Ok(Some(&unfinalized)); @@ -549,7 +625,7 @@ impl Fork { }; // check if the parent is connected to the beginning of the range - if !chain::is_connected_to_block(storage, &parent, &begin)? { + if !chain::is_connected_to_block(storage, parent, &begin)? { return Ok(None); } @@ -621,6 +697,65 @@ impl Fork { } } +impl Default for CommitOperations { + fn default() -> Self { + CommitOperations { operations: Vec::new() } + } +} + +// This should never be allowed for non-test code to avoid revealing its internals. +#[cfg(test)] +impl From>> for CommitOperations { + fn from(operations: Vec>) -> Self { + CommitOperations { operations } + } +} + +impl CommitOperations { + /// Append operation to the set. + fn append(&mut self, new_operation: Option>) { + let new_operation = match new_operation { + Some(new_operation) => new_operation, + None => return, + }; + + let last_operation = match self.operations.pop() { + Some(last_operation) => last_operation, + None => { + self.operations.push(new_operation); + return; + }, + }; + + // we are able (and obliged to) to merge two consequent block finalization operations + match last_operation { + CommitOperation::BlockFinalized(old_finalized_block, old_finalized_entry, old_abandoned_forks) => { + match new_operation { + CommitOperation::BlockFinalized(new_finalized_block, new_finalized_entry, new_abandoned_forks) => { + self.operations.push(CommitOperation::BlockFinalized( + new_finalized_block, + new_finalized_entry, + new_abandoned_forks, + )); + }, + _ => { + self.operations.push(CommitOperation::BlockFinalized( + old_finalized_block, + old_finalized_entry, + old_abandoned_forks, + )); + self.operations.push(new_operation); + }, + } + }, + _ => { + self.operations.push(last_operation); + self.operations.push(new_operation); + }, + } + } +} + /// Destroy fork by deleting all unfinalized entries. pub fn destroy_fork, Tx: StorageTransaction>( head_valid_from: ComplexBlockId, @@ -674,7 +809,7 @@ mod chain { block1: &ComplexBlockId, block2: &ComplexBlockId, ) -> ClientResult { - let (begin, end) = if block1 > block2 { (block2, block1) } else { (block1, block2) }; + let (begin, end) = if *block1 > *block2 { (block2, block1) } else { (block1, block2) }; let mut current = storage.read_header(&end.hash)? .ok_or_else(|| ClientError::UnknownBlock(format!("{}", end.hash)))?; while *current.number() > begin.number { @@ -773,8 +908,8 @@ pub mod tests { // when block is earlier than best finalized block AND it is not finalized // --- 50 --- // ----------> [100] - assert_eq!(ListCache::<_, u64, _>::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)) - .value_at_block(&test_id(50)).unwrap(), None); + assert!(ListCache::<_, u64, _>::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)) + .unwrap().value_at_block(&test_id(50)).is_err()); // when block is earlier than best finalized block AND it is finalized AND value is some // [30] ---- 50 ---> [100] assert_eq!(ListCache::new( @@ -784,7 +919,7 @@ pub mod tests { .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }) .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), PruningStrategy::ByDepth(1024), test_id(100) - ).value_at_block(&test_id(50)).unwrap(), Some((test_id(30), Some(test_id(100)), 30))); + ).unwrap().value_at_block(&test_id(50)).unwrap(), Some((test_id(30), Some(test_id(100)), 30))); // when block is the best finalized block AND value is some // ---> [100] assert_eq!(ListCache::new( @@ -794,18 +929,18 @@ pub mod tests { .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }) .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), PruningStrategy::ByDepth(1024), test_id(100) - ).value_at_block(&test_id(100)).unwrap(), Some((test_id(100), None, 100))); + ).unwrap().value_at_block(&test_id(100)).unwrap(), Some((test_id(100), None, 100))); // when block is parallel to the best finalized block // ---- 100 // ---> [100] - assert_eq!(ListCache::new( + assert!(ListCache::new( DummyStorage::new() .with_meta(Some(test_id(100)), Vec::new()) .with_id(50, H256::from_low_u64_be(50)) .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }) .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), PruningStrategy::ByDepth(1024), test_id(100) - ).value_at_block(&ComplexBlockId::new(H256::from_low_u64_be(2), 100)).unwrap(), None); + ).unwrap().value_at_block(&ComplexBlockId::new(H256::from_low_u64_be(2), 100)).is_err()); // when block is later than last finalized block AND there are no forks AND finalized value is Some // ---> [100] --- 200 @@ -815,7 +950,7 @@ pub mod tests { .with_id(50, H256::from_low_u64_be(50)) .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }), PruningStrategy::ByDepth(1024), test_id(100) - ).value_at_block(&test_id(200)).unwrap(), Some((test_id(100), None, 100))); + ).unwrap().value_at_block(&test_id(200)).unwrap(), Some((test_id(100), None, 100))); // when block is later than last finalized block AND there are no matching forks // AND block is connected to finalized block AND finalized value is Some @@ -831,7 +966,7 @@ pub mod tests { .with_header(test_header(4)) .with_header(fork_header(0, 2, 3)), PruningStrategy::ByDepth(1024), test_id(2) - ).value_at_block(&fork_id(0, 2, 3)).unwrap(), Some((correct_id(2), None, 2))); + ).unwrap().value_at_block(&fork_id(0, 2, 3)).unwrap(), Some((correct_id(2), None, 2))); // when block is later than last finalized block AND there are no matching forks // AND block is not connected to finalized block // --- 2 --- 3 @@ -848,7 +983,7 @@ pub mod tests { .with_header(fork_header(0, 1, 3)) .with_header(fork_header(0, 1, 2)), PruningStrategy::ByDepth(1024), test_id(2) - ).value_at_block(&fork_id(0, 1, 3)).unwrap(), None); + ).unwrap().value_at_block(&fork_id(0, 1, 3)).unwrap(), None); // when block is later than last finalized block AND it appends to unfinalized fork from the end // AND unfinalized value is Some @@ -861,7 +996,7 @@ pub mod tests { .with_header(test_header(4)) .with_header(test_header(5)), PruningStrategy::ByDepth(1024), test_id(2) - ).value_at_block(&correct_id(5)).unwrap(), Some((correct_id(4), None, 4))); + ).unwrap().value_at_block(&correct_id(5)).unwrap(), Some((correct_id(4), None, 4))); // when block is later than last finalized block AND it does not fits unfinalized fork // AND it is connected to the finalized block AND finalized value is Some // ---> [2] ----------> [4] @@ -876,7 +1011,7 @@ pub mod tests { .with_header(test_header(4)) .with_header(fork_header(0, 2, 3)), PruningStrategy::ByDepth(1024), test_id(2) - ).value_at_block(&fork_id(0, 2, 3)).unwrap(), Some((correct_id(2), None, 2))); + ).unwrap().value_at_block(&fork_id(0, 2, 3)).unwrap(), Some((correct_id(2), None, 2))); } #[test] @@ -885,22 +1020,25 @@ pub mod tests { let fin = EntryType::Final; // when trying to insert block < finalized number - assert!(ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)) - .on_block_insert( + let mut ops = Default::default(); + assert!(ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)).unwrap() + .do_on_block_insert( &mut DummyTransaction::new(), test_id(49), test_id(50), Some(50), nfin, + &mut ops, ).unwrap().is_none()); // when trying to insert block @ finalized number - assert!(ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)) - .on_block_insert( + assert!(ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)).unwrap() + .do_on_block_insert( &mut DummyTransaction::new(), test_id(99), test_id(100), Some(100), nfin, + &Default::default(), ).unwrap().is_none()); // when trying to insert non-final block AND it appends to the best block of unfinalized fork @@ -910,19 +1048,23 @@ pub mod tests { .with_meta(None, vec![test_id(4)]) .with_entry(test_id(4), StorageEntry { prev_valid_from: None, value: 4 }), PruningStrategy::ByDepth(1024), test_id(2) - ); + ).unwrap(); cache.unfinalized[0].best_block = Some(test_id(4)); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, test_id(4), test_id(5), Some(4), nfin).unwrap(), - Some(CommitOperation::AppendNewBlock(0, test_id(5)))); + assert_eq!( + cache.do_on_block_insert(&mut tx, test_id(4), test_id(5), Some(4), nfin, &Default::default()).unwrap(), + Some(CommitOperation::AppendNewBlock(0, test_id(5))), + ); assert!(tx.inserted_entries().is_empty()); assert!(tx.removed_entries().is_empty()); assert!(tx.updated_meta().is_none()); // when trying to insert non-final block AND it appends to the best block of unfinalized fork // AND new value is the same as in the fork' best block let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, test_id(4), test_id(5), Some(5), nfin).unwrap(), - Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: test_id(5), value: 5 }))); + assert_eq!( + cache.do_on_block_insert(&mut tx, test_id(4), test_id(5), Some(5), nfin, &Default::default()).unwrap(), + Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: test_id(5), value: 5 })), + ); assert_eq!(*tx.inserted_entries(), vec![test_id(5).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: None, unfinalized: vec![test_id(5)] })); @@ -935,18 +1077,36 @@ pub mod tests { .with_entry(correct_id(4), StorageEntry { prev_valid_from: None, value: 4 }) .with_header(test_header(4)), PruningStrategy::ByDepth(1024), test_id(2) - ); + ).unwrap(); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(4), nfin).unwrap(), - Some(CommitOperation::AppendNewBlock(0, correct_id(5)))); + assert_eq!( + cache.do_on_block_insert( + &mut tx, + correct_id(4), + correct_id(5), + Some(4), + nfin, + &Default::default(), + ).unwrap(), + Some(CommitOperation::AppendNewBlock(0, correct_id(5))), + ); assert!(tx.inserted_entries().is_empty()); assert!(tx.removed_entries().is_empty()); assert!(tx.updated_meta().is_none()); // when trying to insert non-final block AND it is the first block that appends to the best block of unfinalized fork // AND new value is the same as in the fork' best block let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(5), nfin).unwrap(), - Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(5), value: 5 }))); + assert_eq!( + cache.do_on_block_insert( + &mut tx, + correct_id(4), + correct_id(5), + Some(5), + nfin, + &Default::default(), + ).unwrap(), + Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(5), value: 5 })), + ); assert_eq!(*tx.inserted_entries(), vec![correct_id(5).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: None, unfinalized: vec![correct_id(5)] })); @@ -961,10 +1121,13 @@ pub mod tests { .with_header(test_header(3)) .with_header(test_header(4)), PruningStrategy::ByDepth(1024), correct_id(2) - ); + ).unwrap(); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(3), fork_id(0, 3, 4), Some(14), nfin).unwrap(), - Some(CommitOperation::AddNewFork(Entry { valid_from: fork_id(0, 3, 4), value: 14 }))); + assert_eq!( + cache.do_on_block_insert(&mut tx, correct_id(3), fork_id(0, 3, 4), Some(14), nfin, &Default::default()) + .unwrap(), + Some(CommitOperation::AddNewFork(Entry { valid_from: fork_id(0, 3, 4), value: 14 })), + ); assert_eq!(*tx.inserted_entries(), vec![fork_id(0, 3, 4).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(4), fork_id(0, 3, 4)] })); @@ -976,9 +1139,13 @@ pub mod tests { .with_meta(Some(correct_id(2)), vec![]) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }), PruningStrategy::ByDepth(1024), correct_id(2) - ); + ).unwrap(); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), nfin).unwrap(), None); + assert_eq!( + cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), nfin, &Default::default()) + .unwrap(), + None, + ); assert!(tx.inserted_entries().is_empty()); assert!(tx.removed_entries().is_empty()); assert!(tx.updated_meta().is_none()); @@ -989,19 +1156,23 @@ pub mod tests { .with_meta(Some(correct_id(2)), vec![]) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }), PruningStrategy::ByDepth(1024), correct_id(2) - ); + ).unwrap(); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), nfin).unwrap(), - Some(CommitOperation::AddNewFork(Entry { valid_from: correct_id(3), value: 3 }))); + assert_eq!( + cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), nfin, &Default::default()) + .unwrap(), + Some(CommitOperation::AddNewFork(Entry { valid_from: correct_id(3), value: 3 })), + ); assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(3)] })); // when inserting finalized entry AND there are no previous finalized entries - let cache = ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), correct_id(2)); + let cache = ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), correct_id(2)).unwrap(); let mut tx = DummyTransaction::new(); assert_eq!( - cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), fin).unwrap(), + cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), fin, &Default::default()) + .unwrap(), Some(CommitOperation::BlockFinalized( correct_id(3), Some(Entry { valid_from: correct_id(3), value: 3 }), @@ -1017,17 +1188,19 @@ pub mod tests { .with_meta(Some(correct_id(2)), vec![]) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }), PruningStrategy::ByDepth(1024), correct_id(2) - ); + ).unwrap(); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), fin).unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(3), None, Default::default()))); + assert_eq!( + cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), fin, &Default::default()).unwrap(), + Some(CommitOperation::BlockFinalized(correct_id(3), None, Default::default())), + ); assert!(tx.inserted_entries().is_empty()); assert!(tx.removed_entries().is_empty()); assert!(tx.updated_meta().is_none()); // when inserting finalized entry AND value differs from previous finalized let mut tx = DummyTransaction::new(); assert_eq!( - cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), fin).unwrap(), + cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), fin, &Default::default()).unwrap(), Some(CommitOperation::BlockFinalized( correct_id(3), Some(Entry { valid_from: correct_id(3), value: 3 }), @@ -1045,10 +1218,12 @@ pub mod tests { .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: None, value: 13 }), PruningStrategy::ByDepth(1024), correct_id(2) - ); + ).unwrap(); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), fin).unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(3), None, vec![0].into_iter().collect()))); + assert_eq!( + cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), fin, &Default::default()).unwrap(), + Some(CommitOperation::BlockFinalized(correct_id(3), None, vec![0].into_iter().collect())), + ); } #[test] @@ -1060,13 +1235,18 @@ pub mod tests { .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }), PruningStrategy::ByDepth(1024), correct_id(2) - ); + ).unwrap(); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_finalize(&mut tx, correct_id(2), correct_id(3)).unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(3), None, Default::default()))); + assert_eq!( + cache.do_on_block_finalize(&mut tx, correct_id(2), correct_id(3), &Default::default()).unwrap(), + Some(CommitOperation::BlockFinalized(correct_id(3), None, Default::default())), + ); assert!(tx.inserted_entries().is_empty()); assert!(tx.removed_entries().is_empty()); - assert!(tx.updated_meta().is_none()); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(5)] }), + ); // finalization finalizes entry let cache = ListCache::new( DummyStorage::new() @@ -1074,10 +1254,10 @@ pub mod tests { .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }), PruningStrategy::ByDepth(1024), correct_id(4) - ); + ).unwrap(); let mut tx = DummyTransaction::new(); assert_eq!( - cache.on_block_finalize(&mut tx, correct_id(4), correct_id(5)).unwrap(), + cache.do_on_block_finalize(&mut tx, correct_id(4), correct_id(5), &Default::default()).unwrap(), Some(CommitOperation::BlockFinalized( correct_id(5), Some(Entry { valid_from: correct_id(5), value: 5 }), @@ -1094,10 +1274,12 @@ pub mod tests { .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: None, value: 13 }), PruningStrategy::ByDepth(1024), correct_id(2) - ); + ).unwrap(); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_finalize(&mut tx, correct_id(2), correct_id(3)).unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(3), None, vec![0].into_iter().collect()))); + assert_eq!( + cache.do_on_block_finalize(&mut tx, correct_id(2), correct_id(3), &Default::default()).unwrap(), + Some(CommitOperation::BlockFinalized(correct_id(3), None, vec![0].into_iter().collect())), + ); } #[test] @@ -1109,25 +1291,29 @@ pub mod tests { .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }) .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(5)), value: 6 }), PruningStrategy::ByDepth(1024), correct_id(2) - ); + ).unwrap(); // when new block is appended to unfinalized fork - cache.on_transaction_commit(CommitOperation::AppendNewBlock(0, correct_id(6))); + cache.on_transaction_commit(vec![CommitOperation::AppendNewBlock(0, correct_id(6))].into()); assert_eq!(cache.unfinalized[0].best_block, Some(correct_id(6))); // when new entry is appended to unfinalized fork - cache.on_transaction_commit(CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(7), value: 7 })); + cache.on_transaction_commit(vec![ + CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(7), value: 7 }), + ].into()); assert_eq!(cache.unfinalized[0].best_block, Some(correct_id(7))); assert_eq!(cache.unfinalized[0].head, Entry { valid_from: correct_id(7), value: 7 }); // when new fork is added - cache.on_transaction_commit(CommitOperation::AddNewFork(Entry { valid_from: correct_id(10), value: 10 })); + cache.on_transaction_commit(vec![ + CommitOperation::AddNewFork(Entry { valid_from: correct_id(10), value: 10 }), + ].into()); assert_eq!(cache.unfinalized[2].best_block, Some(correct_id(10))); assert_eq!(cache.unfinalized[2].head, Entry { valid_from: correct_id(10), value: 10 }); // when block is finalized + entry is finalized + unfinalized forks are deleted - cache.on_transaction_commit(CommitOperation::BlockFinalized( + cache.on_transaction_commit(vec![CommitOperation::BlockFinalized( correct_id(20), Some(Entry { valid_from: correct_id(20), value: 20 }), vec![0, 1, 2].into_iter().collect(), - )); + )].into()); assert_eq!(cache.best_finalized_block, correct_id(20)); assert_eq!(cache.best_finalized_entry, Some(Entry { valid_from: correct_id(20), value: 20 })); assert!(cache.unfinalized.is_empty()); @@ -1148,7 +1334,7 @@ pub mod tests { .with_header(test_header(4)) .with_header(test_header(5)), PruningStrategy::ByDepth(1024), correct_id(0) - ).find_unfinalized_fork(&correct_id(4)).unwrap().unwrap().head.valid_from, correct_id(5)); + ).unwrap().find_unfinalized_fork((&correct_id(4)).into()).unwrap().unwrap().head.valid_from, correct_id(5)); // --- [2] ---------------> [5] // ----------> [3] ---> 4 assert_eq!(ListCache::new( @@ -1165,7 +1351,8 @@ pub mod tests { .with_header(fork_header(0, 1, 3)) .with_header(fork_header(0, 1, 4)), PruningStrategy::ByDepth(1024), correct_id(0) - ).find_unfinalized_fork(&fork_id(0, 1, 4)).unwrap().unwrap().head.valid_from, fork_id(0, 1, 3)); + ).unwrap() + .find_unfinalized_fork((&fork_id(0, 1, 4)).into()).unwrap().unwrap().head.valid_from, fork_id(0, 1, 3)); // --- [2] ---------------> [5] // ----------> [3] // -----------------> 4 @@ -1185,7 +1372,7 @@ pub mod tests { .with_header(fork_header(1, 1, 3)) .with_header(fork_header(1, 1, 4)), PruningStrategy::ByDepth(1024), correct_id(0) - ).find_unfinalized_fork(&fork_id(1, 1, 4)).unwrap().is_none()); + ).unwrap().find_unfinalized_fork((&fork_id(1, 1, 4)).into()).unwrap().is_none()); } #[test] @@ -1195,7 +1382,7 @@ pub mod tests { .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }) .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }); assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .matches(&storage, &test_id(20)).unwrap(), false); + .matches(&storage, (&test_id(20)).into()).unwrap(), false); // when block is not connected to the begin block let storage = DummyStorage::new() .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) @@ -1206,7 +1393,7 @@ pub mod tests { .with_header(fork_header(0, 2, 4)) .with_header(fork_header(0, 2, 3)); assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } - .matches(&storage, &fork_id(0, 2, 4)).unwrap(), false); + .matches(&storage, (&fork_id(0, 2, 4)).into()).unwrap(), false); // when block is not connected to the end block let storage = DummyStorage::new() .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) @@ -1216,14 +1403,14 @@ pub mod tests { .with_header(test_header(3)) .with_header(fork_header(0, 3, 4)); assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } - .matches(&storage, &fork_id(0, 3, 4)).unwrap(), false); + .matches(&storage, (&fork_id(0, 3, 4)).into()).unwrap(), false); // when block is connected to the begin block AND end is open let storage = DummyStorage::new() .with_entry(correct_id(5), StorageEntry { prev_valid_from: None, value: 100 }) .with_header(test_header(5)) .with_header(test_header(6)); assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } - .matches(&storage, &correct_id(6)).unwrap(), true); + .matches(&storage, (&correct_id(6)).into()).unwrap(), true); // when block is connected to the begin block AND to the end block let storage = DummyStorage::new() .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) @@ -1232,7 +1419,7 @@ pub mod tests { .with_header(test_header(4)) .with_header(test_header(3)); assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } - .matches(&storage, &correct_id(4)).unwrap(), true); + .matches(&storage, (&correct_id(4)).into()).unwrap(), true); } #[test] @@ -1338,9 +1525,21 @@ pub mod tests { #[test] fn is_connected_to_block_fails() { // when storage returns error - assert!(chain::is_connected_to_block::<_, u64, _>(&FaultyStorage, &test_id(1), &test_id(100)).is_err()); + assert!( + chain::is_connected_to_block::<_, u64, _>( + &FaultyStorage, + (&test_id(1)).into(), + &test_id(100), + ).is_err(), + ); // when there's no header in the storage - assert!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new(), &test_id(1), &test_id(100)).is_err()); + assert!( + chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new(), + (&test_id(1)).into(), + &test_id(100), + ).is_err(), + ); } #[test] @@ -1348,35 +1547,35 @@ pub mod tests { // when without iterations we end up with different block assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() .with_header(test_header(1)), - &test_id(1), &correct_id(1)).unwrap(), false); + (&test_id(1)).into(), &correct_id(1)).unwrap(), false); // when with ASC iterations we end up with different block assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() .with_header(test_header(0)) .with_header(test_header(1)) .with_header(test_header(2)), - &test_id(0), &correct_id(2)).unwrap(), false); + (&test_id(0)).into(), &correct_id(2)).unwrap(), false); // when with DESC iterations we end up with different block assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() .with_header(test_header(0)) .with_header(test_header(1)) .with_header(test_header(2)), - &correct_id(2), &test_id(0)).unwrap(), false); + (&correct_id(2)).into(), &test_id(0)).unwrap(), false); // when without iterations we end up with the same block assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() .with_header(test_header(1)), - &correct_id(1), &correct_id(1)).unwrap(), true); + (&correct_id(1)).into(), &correct_id(1)).unwrap(), true); // when with ASC iterations we end up with the same block assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() .with_header(test_header(0)) .with_header(test_header(1)) .with_header(test_header(2)), - &correct_id(0), &correct_id(2)).unwrap(), true); + (&correct_id(0)).into(), &correct_id(2)).unwrap(), true); // when with DESC iterations we end up with the same block assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() .with_header(test_header(0)) .with_header(test_header(1)) .with_header(test_header(2)), - &correct_id(2), &correct_id(0)).unwrap(), true); + (&correct_id(2)).into(), &correct_id(0)).unwrap(), true); } #[test] @@ -1454,7 +1653,7 @@ pub mod tests { .with_entry(test_id(10), StorageEntry { prev_valid_from: None, value: 10 }) .with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(10)), value: 20 }) .with_entry(test_id(30), StorageEntry { prev_valid_from: Some(test_id(20)), value: 30 }), - strategy, test_id(9)); + strategy, test_id(9)).unwrap(); let mut tx = DummyTransaction::new(); // when finalizing entry #10: no entries pruned @@ -1515,28 +1714,64 @@ pub mod tests { .with_header(fork_header(1, 2, 5)) .with_header(fork_header(2, 4, 5)), PruningStrategy::ByDepth(1024), correct_id(1) - ); + ).unwrap(); // when 5 is reverted: entry 5 is truncated - let op = cache.on_block_revert(&mut DummyTransaction::new(), &correct_id(5)).unwrap(); + let op = cache.do_on_block_revert(&mut DummyTransaction::new(), &correct_id(5)).unwrap(); assert_eq!(op, CommitOperation::BlockReverted(vec![ (0, Some(Fork { best_block: None, head: Entry { valid_from: correct_id(4), value: 4 } })), ].into_iter().collect())); - cache.on_transaction_commit(op); + cache.on_transaction_commit(vec![op].into()); // when 3 is reverted: entries 4+5' are truncated - let op = cache.on_block_revert(&mut DummyTransaction::new(), &correct_id(3)).unwrap(); + let op = cache.do_on_block_revert(&mut DummyTransaction::new(), &correct_id(3)).unwrap(); assert_eq!(op, CommitOperation::BlockReverted(vec![ (0, None), (2, None), ].into_iter().collect())); - cache.on_transaction_commit(op); + cache.on_transaction_commit(vec![op].into()); // when 2 is reverted: entries 4'+5' are truncated - let op = cache.on_block_revert(&mut DummyTransaction::new(), &correct_id(2)).unwrap(); + let op = cache.do_on_block_revert(&mut DummyTransaction::new(), &correct_id(2)).unwrap(); assert_eq!(op, CommitOperation::BlockReverted(vec![ (0, None), ].into_iter().collect())); - cache.on_transaction_commit(op); + cache.on_transaction_commit(vec![op].into()); + } + + #[test] + fn append_commit_operation_works() { + let mut ops = CommitOperations::default(); + ops.append(None); + assert_eq!(ops.operations, Vec::new()); + + ops.append(Some(CommitOperation::BlockFinalized( + test_id(10), + Some(Entry { valid_from: test_id(10), value: 10 }), + vec![5].into_iter().collect(), + ))); + assert_eq!( + ops.operations, + vec![CommitOperation::BlockFinalized( + test_id(10), + Some(Entry { valid_from: test_id(10), value: 10 }), + vec![5].into_iter().collect(), + )], + ); + + ops.append(Some(CommitOperation::BlockFinalized( + test_id(20), + Some(Entry { valid_from: test_id(20), value: 20 }), + vec![5, 6].into_iter().collect(), + ))); + + assert_eq!( + ops.operations, + vec![CommitOperation::BlockFinalized( + test_id(20), + Some(Entry { valid_from: test_id(20), value: 20 }), + vec![5, 6].into_iter().collect(), + )], + ); } } diff --git a/client/db/src/cache/list_storage.rs b/client/db/src/cache/list_storage.rs index 9cd3b1049a4f8..606090ee1401d 100644 --- a/client/db/src/cache/list_storage.rs +++ b/client/db/src/cache/list_storage.rs @@ -222,7 +222,9 @@ mod meta { unfinalized.push(&entry.valid_from); }, CommitOperation::BlockFinalized(_, ref finalizing_entry, ref forks) => { - finalized = finalizing_entry.as_ref().map(|entry| &entry.valid_from); + if let Some(finalizing_entry) = finalizing_entry.as_ref() { + finalized = Some(&finalizing_entry.valid_from); + } for fork_index in forks.iter().rev() { unfinalized.remove(*fork_index); } diff --git a/client/db/src/cache/mod.rs b/client/db/src/cache/mod.rs index bef8d9a7919d3..8fd1adc094ae4 100644 --- a/client/db/src/cache/mod.rs +++ b/client/db/src/cache/mod.rs @@ -16,7 +16,7 @@ //! DB-backed cache of blockchain data. -use std::{sync::Arc, collections::HashMap}; +use std::{sync::Arc, collections::{HashMap, hash_map::Entry}}; use parking_lot::RwLock; use kvdb::{KeyValueDB, DBTransaction}; @@ -51,8 +51,10 @@ pub enum EntryType { /// Block identifier that holds both hash and number. #[derive(Clone, Debug, Encode, Decode, PartialEq)] pub struct ComplexBlockId { - hash: Block::Hash, - number: NumberFor, + /// Hash of the block. + pub(crate) hash: Block::Hash, + /// Number of the block. + pub(crate) number: NumberFor, } impl ComplexBlockId { @@ -79,7 +81,7 @@ pub struct DbCache { db: Arc, key_lookup_column: u32, header_column: u32, - authorities_column: u32, + cache_column: u32, genesis_hash: Block::Hash, best_finalized_block: ComplexBlockId, } @@ -90,7 +92,7 @@ impl DbCache { db: Arc, key_lookup_column: u32, header_column: u32, - authorities_column: u32, + cache_column: u32, genesis_hash: Block::Hash, best_finalized_block: ComplexBlockId, ) -> Self { @@ -99,7 +101,7 @@ impl DbCache { db, key_lookup_column, header_column, - authorities_column, + cache_column, genesis_hash, best_finalized_block, } @@ -115,30 +117,48 @@ impl DbCache { DbCacheTransaction { cache: self, tx, - cache_at_op: HashMap::new(), + cache_at_ops: HashMap::new(), best_finalized_block: None, } } + /// Begin cache transaction with given ops. + pub fn transaction_with_ops<'a>( + &'a mut self, + tx: &'a mut DBTransaction, + ops: DbCacheTransactionOps, + ) -> DbCacheTransaction<'a, Block> { + DbCacheTransaction { + cache: self, + tx, + cache_at_ops: ops.cache_at_ops, + best_finalized_block: ops.best_finalized_block, + } + } + /// Run post-commit cache operations. - pub fn commit(&mut self, ops: DbCacheTransactionOps) { - for (name, op) in ops.cache_at_op.into_iter() { - self.get_cache(name).on_transaction_commit(op); + pub fn commit(&mut self, ops: DbCacheTransactionOps) -> ClientResult<()> { + for (name, ops) in ops.cache_at_ops.into_iter() { + self.get_cache(name)?.on_transaction_commit(ops); } if let Some(best_finalized_block) = ops.best_finalized_block { self.best_finalized_block = best_finalized_block; } + Ok(()) } /// Creates `ListCache` with the given name or returns a reference to the existing. - fn get_cache(&mut self, name: CacheKeyId) -> &mut ListCache, self::list_storage::DbStorage> { + pub(crate) fn get_cache( + &mut self, + name: CacheKeyId, + ) -> ClientResult<&mut ListCache, self::list_storage::DbStorage>> { get_cache_helper( &mut self.cache_at, name, &self.db, self.key_lookup_column, self.header_column, - self.authorities_column, + self.cache_column, &self.best_finalized_block ) } @@ -154,34 +174,49 @@ fn get_cache_helper<'a, Block: BlockT>( header: u32, cache: u32, best_finalized_block: &ComplexBlockId, -) -> &'a mut ListCache, self::list_storage::DbStorage> { - cache_at.entry(name).or_insert_with(|| { - ListCache::new( - self::list_storage::DbStorage::new(name.to_vec(), db.clone(), - self::list_storage::DbColumns { - meta: COLUMN_META, - key_lookup, - header, - cache, - }, - ), - cache_pruning_strategy(name), - best_finalized_block.clone(), - ) - }) +) -> ClientResult<&'a mut ListCache, self::list_storage::DbStorage>> { + match cache_at.entry(name) { + Entry::Occupied(entry) => Ok(entry.into_mut()), + Entry::Vacant(entry) => { + let cache = ListCache::new( + self::list_storage::DbStorage::new(name.to_vec(), db.clone(), + self::list_storage::DbColumns { + meta: COLUMN_META, + key_lookup, + header, + cache, + }, + ), + cache_pruning_strategy(name), + best_finalized_block.clone(), + )?; + Ok(entry.insert(cache)) + } + } } /// Cache operations that are to be committed after database transaction is committed. +#[derive(Default)] pub struct DbCacheTransactionOps { - cache_at_op: HashMap>>, + cache_at_ops: HashMap>>, best_finalized_block: Option>, } +impl DbCacheTransactionOps { + /// Empty transaction ops. + pub fn empty() -> DbCacheTransactionOps { + DbCacheTransactionOps { + cache_at_ops: HashMap::new(), + best_finalized_block: None, + } + } +} + /// Database-backed blockchain data cache transaction valid for single block import. pub struct DbCacheTransaction<'a, Block: BlockT> { cache: &'a mut DbCache, tx: &'a mut DBTransaction, - cache_at_op: HashMap>>, + cache_at_ops: HashMap>>, best_finalized_block: Option>, } @@ -189,7 +224,7 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { /// Convert transaction into post-commit operations set. pub fn into_ops(self) -> DbCacheTransactionOps { DbCacheTransactionOps { - cache_at_op: self.cache_at_op, + cache_at_ops: self.cache_at_ops, best_finalized_block: self.best_finalized_block, } } @@ -202,8 +237,6 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { data_at: HashMap>, entry_type: EntryType, ) -> ClientResult { - assert!(self.cache_at_op.is_empty()); - // prepare list of caches that are not update // (we might still need to do some cache maintenance in this case) let missed_caches = self.cache.cache_at.keys() @@ -212,8 +245,9 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { .collect::>(); let mut insert_op = |name: CacheKeyId, value: Option>| -> Result<(), sp_blockchain::Error> { - let cache = self.cache.get_cache(name); - let op = cache.on_block_insert( + let cache = self.cache.get_cache(name)?; + let cache_ops = self.cache_at_ops.entry(name).or_default(); + cache.on_block_insert( &mut self::list_storage::DbStorageTransaction::new( cache.storage(), &mut self.tx, @@ -222,10 +256,9 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { block.clone(), value, entry_type, + cache_ops, )?; - if let Some(op) = op { - self.cache_at_op.insert(name, op); - } + Ok(()) }; @@ -245,23 +278,19 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { pub fn on_block_finalize( mut self, parent: ComplexBlockId, - block: ComplexBlockId + block: ComplexBlockId, ) -> ClientResult { - assert!(self.cache_at_op.is_empty()); - - for (name, cache_at) in self.cache.cache_at.iter() { - let op = cache_at.on_block_finalize( + for (name, cache) in self.cache.cache_at.iter() { + let cache_ops = self.cache_at_ops.entry(*name).or_default(); + cache.on_block_finalize( &mut self::list_storage::DbStorageTransaction::new( - cache_at.storage(), + cache.storage(), &mut self.tx ), parent.clone(), block.clone(), + cache_ops, )?; - - if let Some(op) = op { - self.cache_at_op.insert(name.to_owned(), op); - } } self.best_finalized_block = Some(block); @@ -275,16 +304,15 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { reverted_block: &ComplexBlockId, ) -> ClientResult { for (name, cache) in self.cache.cache_at.iter() { - let op = cache.on_block_revert( + let cache_ops = self.cache_at_ops.entry(*name).or_default(); + cache.on_block_revert( &mut self::list_storage::DbStorageTransaction::new( cache.storage(), &mut self.tx ), reverted_block, + cache_ops, )?; - - assert!(!self.cache_at_op.contains_key(name)); - self.cache_at_op.insert(name.to_owned(), op); } Ok(self) @@ -310,7 +338,7 @@ impl BlockchainCache for DbCacheSync { )?; let tx_ops = tx.into_ops(); db.write(dbtx).map_err(db_err)?; - cache.commit(tx_ops); + cache.commit(tx_ops)?; Ok(()) } @@ -318,40 +346,38 @@ impl BlockchainCache for DbCacheSync { &self, key: &CacheKeyId, at: &BlockId, - ) -> Option<((NumberFor, Block::Hash), Option<(NumberFor, Block::Hash)>, Vec)> { + ) -> ClientResult, Block::Hash), Option<(NumberFor, Block::Hash)>, Vec)>> { let mut cache = self.0.write(); - let storage = cache.get_cache(*key).storage(); + let cache = cache.get_cache(*key)?; + let storage = cache.storage(); let db = storage.db(); let columns = storage.columns(); let at = match *at { BlockId::Hash(hash) => { - let header = utils::read_header::( + let header = utils::require_header::( &**db, columns.key_lookup, columns.header, - BlockId::Hash(hash.clone())).ok()??; + BlockId::Hash(hash.clone()))?; ComplexBlockId::new(hash, *header.number()) }, BlockId::Number(number) => { - let hash = utils::read_header::( + let hash = utils::require_header::( &**db, columns.key_lookup, columns.header, - BlockId::Number(number.clone())).ok()??.hash(); + BlockId::Number(number.clone()))?.hash(); ComplexBlockId::new(hash, number) }, }; - cache.cache_at - .get(key)? - .value_at_block(&at) + cache.value_at_block(&at) .map(|block_and_value| block_and_value.map(|(begin_block, end_block, value)| ( (begin_block.number, begin_block.hash), end_block.map(|end_block| (end_block.number, end_block.hash)), value, ))) - .ok()? } } diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs new file mode 100644 index 0000000000000..72163a5694213 --- /dev/null +++ b/client/db/src/changes_tries_storage.rs @@ -0,0 +1,1015 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! DB-backed changes tries storage. + +use std::collections::{HashMap, HashSet}; +use std::sync::Arc; +use hash_db::Prefix; +use kvdb::{KeyValueDB, DBTransaction}; +use codec::{Decode, Encode}; +use parking_lot::RwLock; +use sp_blockchain::{Error as ClientError, Result as ClientResult}; +use sp_trie::MemoryDB; +use sc_client_api::backend::PrunableStateChangesTrieStorage; +use sp_blockchain::{well_known_cache_keys, Cache as BlockchainCache}; +use sp_core::{ChangesTrieConfiguration, ChangesTrieConfigurationRange, convert_hash}; +use sp_runtime::traits::{ + Block as BlockT, Header as HeaderT, HasherFor, NumberFor, One, Zero, CheckedSub, +}; +use sp_runtime::generic::{BlockId, DigestItem, ChangesTrieSignal}; +use sp_state_machine::{DBValue, ChangesTrieBuildCache, ChangesTrieCacheAction}; +use crate::utils::{self, Meta, meta_keys, db_err}; +use crate::cache::{ + DbCacheSync, DbCache, DbCacheTransactionOps, + ComplexBlockId, EntryType as CacheEntryType, +}; + +/// Extract new changes trie configuration (if available) from the header. +pub fn extract_new_configuration(header: &Header) -> Option<&Option> { + header.digest() + .log(DigestItem::as_changes_trie_signal) + .and_then(ChangesTrieSignal::as_new_configuration) +} + +/// Opaque configuration cache transaction. During its lifetime, no-one should modify cache. This is currently +/// guaranteed because import lock is held during block import/finalization. +pub struct DbChangesTrieStorageTransaction { + /// Cache operations that must be performed after db transaction is comitted. + cache_ops: DbCacheTransactionOps, + /// New configuration (if changed at current block). + new_config: Option>, +} + +impl DbChangesTrieStorageTransaction { + /// Consume self and return transaction with given new configuration. + pub fn with_new_config(mut self, new_config: Option>) -> Self { + self.new_config = new_config; + self + } +} + +impl From> for DbChangesTrieStorageTransaction { + fn from(cache_ops: DbCacheTransactionOps) -> Self { + DbChangesTrieStorageTransaction { + cache_ops, + new_config: None, + } + } +} + +/// Changes tries storage. +/// +/// Stores all tries in separate DB column. +/// Lock order: meta, tries_meta, cache, build_cache. +pub struct DbChangesTrieStorage { + db: Arc, + meta_column: u32, + changes_tries_column: u32, + key_lookup_column: u32, + header_column: u32, + meta: Arc, Block::Hash>>>, + tries_meta: RwLock>, + min_blocks_to_keep: Option, + /// The cache stores all ever existing changes tries configurations. + cache: DbCacheSync, + /// Build cache is a map of block => set of storage keys changed at this block. + /// They're used to build digest blocks - instead of reading+parsing tries from db + /// we just use keys sets from the cache. + build_cache: RwLock>>, +} + +/// Persistent struct that contains all the changes tries metadata. +#[derive(Decode, Encode, Debug)] +struct ChangesTriesMeta { + /// Oldest unpruned max-level (or skewed) digest trie blocks range. + /// The range is inclusive from both sides. + /// Is None only if: + /// 1) we haven't yet finalized any blocks (except genesis) + /// 2) if best_finalized_block - min_blocks_to_keep points to the range where changes tries are disabled + /// 3) changes tries pruning is disabled + pub oldest_digest_range: Option<(NumberFor, NumberFor)>, + /// End block (inclusive) of oldest pruned max-level (or skewed) digest trie blocks range. + /// It is guaranteed that we have no any changes tries before (and including) this block. + /// It is guaranteed that all existing changes tries after this block are not yet pruned (if created). + pub oldest_pruned_digest_range_end: NumberFor, +} + +impl DbChangesTrieStorage { + /// Create new changes trie storage. + pub fn new( + db: Arc, + meta_column: u32, + changes_tries_column: u32, + key_lookup_column: u32, + header_column: u32, + cache_column: u32, + meta: Arc, Block::Hash>>>, + min_blocks_to_keep: Option, + ) -> ClientResult { + let (finalized_hash, finalized_number, genesis_hash) = { + let meta = meta.read(); + (meta.finalized_hash, meta.finalized_number, meta.genesis_hash) + }; + let tries_meta = read_tries_meta(&*db, meta_column)?; + Ok(Self { + db: db.clone(), + meta_column, + changes_tries_column, + key_lookup_column, + header_column, + meta, + min_blocks_to_keep, + cache: DbCacheSync(RwLock::new(DbCache::new( + db.clone(), + key_lookup_column, + header_column, + cache_column, + genesis_hash, + ComplexBlockId::new(finalized_hash, finalized_number), + ))), + build_cache: RwLock::new(ChangesTrieBuildCache::new()), + tries_meta: RwLock::new(tries_meta), + }) + } + + /// Commit new changes trie. + pub fn commit( + &self, + tx: &mut DBTransaction, + mut changes_trie: MemoryDB>, + parent_block: ComplexBlockId, + block: ComplexBlockId, + new_header: &Block::Header, + finalized: bool, + new_configuration: Option>, + cache_tx: Option>, + ) -> ClientResult> { + // insert changes trie, associated with block, into DB + for (key, (val, _)) in changes_trie.drain() { + tx.put(self.changes_tries_column, key.as_ref(), &val); + } + + // if configuration has not been changed AND block is not finalized => nothing to do here + let new_configuration = match new_configuration { + Some(new_configuration) => new_configuration, + None if !finalized => return Ok(DbCacheTransactionOps::empty().into()), + None => return self.finalize( + tx, + parent_block.hash, + block.hash, + block.number, + Some(new_header), + cache_tx, + ), + }; + + // update configuration cache + let mut cache_at = HashMap::new(); + cache_at.insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_configuration.encode()); + Ok(DbChangesTrieStorageTransaction::from(match cache_tx { + Some(cache_tx) => self.cache.0.write() + .transaction_with_ops(tx, cache_tx.cache_ops) + .on_block_insert( + parent_block, + block, + cache_at, + if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal }, + )? + .into_ops(), + None => self.cache.0.write() + .transaction(tx) + .on_block_insert( + parent_block, + block, + cache_at, + if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal }, + )? + .into_ops(), + }).with_new_config(Some(new_configuration))) + } + + /// Called when block is finalized. + pub fn finalize( + &self, + tx: &mut DBTransaction, + parent_block_hash: Block::Hash, + block_hash: Block::Hash, + block_num: NumberFor, + new_header: Option<&Block::Header>, + cache_tx: Option>, + ) -> ClientResult> { + // prune obsolete changes tries + self.prune(tx, block_hash, block_num, new_header.clone(), cache_tx.as_ref())?; + + // if we have inserted the block that we're finalizing in the same transaction + // => then we have already finalized it from the commit() call + if cache_tx.is_some() { + if let Some(new_header) = new_header { + if new_header.hash() == block_hash { + return Ok(cache_tx.expect("guarded by cache_tx.is_some(); qed")); + } + } + } + + // and finalize configuration cache entries + let block = ComplexBlockId::new(block_hash, block_num); + let parent_block_num = block_num.checked_sub(&One::one()).unwrap_or_else(|| Zero::zero()); + let parent_block = ComplexBlockId::new(parent_block_hash, parent_block_num); + Ok(match cache_tx { + Some(cache_tx) => DbChangesTrieStorageTransaction::from( + self.cache.0.write() + .transaction_with_ops(tx, cache_tx.cache_ops) + .on_block_finalize( + parent_block, + block, + )? + .into_ops() + ).with_new_config(cache_tx.new_config), + None => DbChangesTrieStorageTransaction::from( + self.cache.0.write() + .transaction(tx) + .on_block_finalize( + parent_block, + block, + )? + .into_ops() + ), + }) + } + + /// When block is reverted. + pub fn revert( + &self, + tx: &mut DBTransaction, + block: &ComplexBlockId, + ) -> ClientResult> { + Ok(self.cache.0.write().transaction(tx) + .on_block_revert(block)? + .into_ops() + .into()) + } + + /// When transaction has been committed. + pub fn post_commit(&self, tx: Option>) { + if let Some(tx) = tx { + self.cache.0.write().commit(tx.cache_ops) + .expect("only fails if cache with given name isn't loaded yet;\ + cache is already loaded because there is tx; qed"); + } + } + + /// Commit changes into changes trie build cache. + pub fn commit_build_cache(&self, cache_update: ChangesTrieCacheAction>) { + self.build_cache.write().perform(cache_update); + } + + /// Prune obsolete changes tries. + fn prune( + &self, + tx: &mut DBTransaction, + block_hash: Block::Hash, + block_num: NumberFor, + new_header: Option<&Block::Header>, + cache_tx: Option<&DbChangesTrieStorageTransaction>, + ) -> ClientResult<()> { + // never prune on archive nodes + let min_blocks_to_keep = match self.min_blocks_to_keep { + Some(min_blocks_to_keep) => min_blocks_to_keep, + None => return Ok(()), + }; + + let mut tries_meta = self.tries_meta.write(); + let mut next_digest_range_start = block_num; + loop { + // prune oldest digest if it is known + // it could be unknown if: + // 1) either we're finalizing block#1 + // 2) or we are (or were) in period where changes tries are disabled + if let Some((begin, end)) = tries_meta.oldest_digest_range { + if block_num <= end || block_num - end <= min_blocks_to_keep.into() { + break; + } + + tries_meta.oldest_pruned_digest_range_end = end; + sp_state_machine::prune_changes_tries( + &*self, + begin, + end, + &sp_state_machine::ChangesTrieAnchorBlockId { + hash: convert_hash(&block_hash), + number: block_num, + }, + |node| tx.delete(self.changes_tries_column, node.as_ref()), + ); + + next_digest_range_start = end + One::one(); + } + + // proceed to the next configuration range + let next_digest_range_start_hash = match block_num == next_digest_range_start { + true => block_hash, + false => utils::require_header::( + &*self.db, + self.key_lookup_column, + self.header_column, + BlockId::Number(next_digest_range_start), + )?.hash(), + }; + + let config_for_new_block = new_header + .map(|header| *header.number() == next_digest_range_start) + .unwrap_or(false); + let next_config = match cache_tx { + Some(cache_tx) if config_for_new_block && cache_tx.new_config.is_some() => { + let config = cache_tx + .new_config + .clone() + .expect("guarded by is_some(); qed"); + ChangesTrieConfigurationRange { + zero: (block_num, block_hash), + end: None, + config, + } + }, + _ if config_for_new_block => { + self.configuration_at(&BlockId::Hash(*new_header.expect( + "config_for_new_block is only true when new_header is passed; qed" + ).parent_hash()))? + }, + _ => self.configuration_at(&BlockId::Hash(next_digest_range_start_hash))?, + }; + if let Some(config) = next_config.config { + let mut oldest_digest_range = config + .next_max_level_digest_range(next_config.zero.0, next_digest_range_start) + .unwrap_or_else(|| (next_digest_range_start, next_digest_range_start)); + + if let Some(end) = next_config.end { + if end.0 < oldest_digest_range.1 { + oldest_digest_range.1 = end.0; + } + } + + tries_meta.oldest_digest_range = Some(oldest_digest_range); + continue; + } + + tries_meta.oldest_digest_range = None; + break; + } + + write_tries_meta(tx, self.meta_column, &*tries_meta); + Ok(()) + } +} + +impl PrunableStateChangesTrieStorage for DbChangesTrieStorage { + fn storage(&self) -> &dyn sp_state_machine::ChangesTrieStorage, NumberFor> { + self + } + + fn configuration_at(&self, at: &BlockId) -> ClientResult< + ChangesTrieConfigurationRange, Block::Hash> + > { + self.cache + .get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, at)? + .and_then(|(zero, end, encoded)| Decode::decode(&mut &encoded[..]).ok() + .map(|config| ChangesTrieConfigurationRange { zero, end, config })) + .ok_or_else(|| ClientError::ErrorReadingChangesTriesConfig) + } + + fn oldest_pruned_digest_range_end(&self) -> NumberFor { + self.tries_meta.read().oldest_pruned_digest_range_end + } +} + +impl sp_state_machine::ChangesTrieRootsStorage, NumberFor> + for DbChangesTrieStorage +{ + fn build_anchor( + &self, + hash: Block::Hash, + ) -> Result>, String> { + utils::read_header::(&*self.db, self.key_lookup_column, self.header_column, BlockId::Hash(hash)) + .map_err(|e| e.to_string()) + .and_then(|maybe_header| maybe_header.map(|header| + sp_state_machine::ChangesTrieAnchorBlockId { + hash, + number: *header.number(), + } + ).ok_or_else(|| format!("Unknown header: {}", hash))) + } + + fn root( + &self, + anchor: &sp_state_machine::ChangesTrieAnchorBlockId>, + block: NumberFor, + ) -> Result, String> { + // check API requirement: we can't get NEXT block(s) based on anchor + if block > anchor.number { + return Err(format!("Can't get changes trie root at {} using anchor at {}", block, anchor.number)); + } + + // we need to get hash of the block to resolve changes trie root + let block_id = if block <= self.meta.read().finalized_number { + // if block is finalized, we could just read canonical hash + BlockId::Number(block) + } else { + // the block is not finalized + let mut current_num = anchor.number; + let mut current_hash: Block::Hash = convert_hash(&anchor.hash); + let maybe_anchor_header: Block::Header = utils::require_header::( + &*self.db, self.key_lookup_column, self.header_column, BlockId::Number(current_num) + ).map_err(|e| e.to_string())?; + if maybe_anchor_header.hash() == current_hash { + // if anchor is canonicalized, then the block is also canonicalized + BlockId::Number(block) + } else { + // else (block is not finalized + anchor is not canonicalized): + // => we should find the required block hash by traversing + // back from the anchor to the block with given number + while current_num != block { + let current_header: Block::Header = utils::require_header::( + &*self.db, self.key_lookup_column, self.header_column, BlockId::Hash(current_hash) + ).map_err(|e| e.to_string())?; + + current_hash = *current_header.parent_hash(); + current_num = current_num - One::one(); + } + + BlockId::Hash(current_hash) + } + }; + + Ok( + utils::require_header::( + &*self.db, + self.key_lookup_column, + self.header_column, + block_id, + ) + .map_err(|e| e.to_string())? + .digest() + .log(DigestItem::as_changes_trie_root) + .cloned() + ) + } +} + +impl sp_state_machine::ChangesTrieStorage, NumberFor> + for DbChangesTrieStorage +where + Block: BlockT, +{ + fn as_roots_storage(&self) -> &dyn sp_state_machine::ChangesTrieRootsStorage, NumberFor> { + self + } + + fn with_cached_changed_keys( + &self, + root: &Block::Hash, + functor: &mut dyn FnMut(&HashMap>, HashSet>>), + ) -> bool { + self.build_cache.read().with_changed_keys(root, functor) + } + + fn get(&self, key: &Block::Hash, _prefix: Prefix) -> Result, String> { + self.db.get(self.changes_tries_column, key.as_ref()) + .map_err(|err| format!("{}", err)) + } +} + +/// Read changes tries metadata from database. +fn read_tries_meta( + db: &dyn KeyValueDB, + meta_column: u32, +) -> ClientResult> { + match db.get(meta_column, meta_keys::CHANGES_TRIES_META).map_err(db_err)? { + Some(h) => match Decode::decode(&mut &h[..]) { + Ok(h) => Ok(h), + Err(err) => Err(ClientError::Backend(format!("Error decoding changes tries metadata: {}", err))), + }, + None => Ok(ChangesTriesMeta { + oldest_digest_range: None, + oldest_pruned_digest_range_end: Zero::zero(), + }), + } +} + +/// Write changes tries metadata from database. +fn write_tries_meta( + tx: &mut DBTransaction, + meta_column: u32, + meta: &ChangesTriesMeta, +) { + tx.put(meta_column, meta_keys::CHANGES_TRIES_META, &meta.encode()); +} + +#[cfg(test)] +mod tests { + use hash_db::EMPTY_PREFIX; + use sc_client_api::backend::{ + Backend as ClientBackend, NewBlockState, BlockImportOperation, PrunableStateChangesTrieStorage, + }; + use sp_blockchain::HeaderBackend as BlockchainHeaderBackend; + use sp_core::H256; + use sp_runtime::testing::{Digest, Header}; + use sp_runtime::traits::{Hash, BlakeTwo256}; + use sp_state_machine::{ChangesTrieRootsStorage, ChangesTrieStorage}; + use crate::Backend; + use crate::tests::{Block, insert_header, prepare_changes}; + use super::*; + + fn changes(number: u64) -> Option, Vec)>> { + Some(vec![(number.to_le_bytes().to_vec(), number.to_le_bytes().to_vec())]) + } + + fn insert_header_with_configuration_change( + backend: &Backend, + number: u64, + parent_hash: H256, + changes: Option, Vec)>>, + new_configuration: Option, + ) -> H256 { + let mut digest = Digest::default(); + let mut changes_trie_update = Default::default(); + if let Some(changes) = changes { + let (root, update) = prepare_changes(changes); + digest.push(DigestItem::ChangesTrieRoot(root)); + changes_trie_update = update; + } + digest.push(DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration(new_configuration))); + + let header = Header { + number, + parent_hash, + state_root: BlakeTwo256::trie_root(Vec::new()), + digest, + extrinsics_root: Default::default(), + }; + let header_hash = header.hash(); + + let block_id = if number == 0 { + BlockId::Hash(Default::default()) + } else { + BlockId::Number(number - 1) + }; + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, block_id).unwrap(); + op.set_block_data(header, None, None, NewBlockState::Best).unwrap(); + op.update_changes_trie((changes_trie_update, ChangesTrieCacheAction::Clear)).unwrap(); + backend.commit_operation(op).unwrap(); + + header_hash + } + + #[test] + fn changes_trie_storage_works() { + let backend = Backend::::new_test(1000, 100); + backend.changes_tries_storage.meta.write().finalized_number = 1000; + + let check_changes = |backend: &Backend, block: u64, changes: Vec<(Vec, Vec)>| { + let (changes_root, mut changes_trie_update) = prepare_changes(changes); + let anchor = sp_state_machine::ChangesTrieAnchorBlockId { + hash: backend.blockchain().header(BlockId::Number(block)).unwrap().unwrap().hash(), + number: block + }; + assert_eq!(backend.changes_tries_storage.root(&anchor, block), Ok(Some(changes_root))); + + let storage = backend.changes_tries_storage.storage(); + for (key, (val, _)) in changes_trie_update.drain() { + assert_eq!(storage.get(&key, EMPTY_PREFIX), Ok(Some(val))); + } + }; + + let changes0 = vec![(b"key_at_0".to_vec(), b"val_at_0".to_vec())]; + let changes1 = vec![ + (b"key_at_1".to_vec(), b"val_at_1".to_vec()), + (b"another_key_at_1".to_vec(), b"another_val_at_1".to_vec()), + ]; + let changes2 = vec![(b"key_at_2".to_vec(), b"val_at_2".to_vec())]; + + let block0 = insert_header(&backend, 0, Default::default(), Some(changes0.clone()), Default::default()); + let block1 = insert_header(&backend, 1, block0, Some(changes1.clone()), Default::default()); + let _ = insert_header(&backend, 2, block1, Some(changes2.clone()), Default::default()); + + // check that the storage contains tries for all blocks + check_changes(&backend, 0, changes0); + check_changes(&backend, 1, changes1); + check_changes(&backend, 2, changes2); + } + + #[test] + fn changes_trie_storage_works_with_forks() { + let backend = Backend::::new_test(1000, 100); + + let changes0 = vec![(b"k0".to_vec(), b"v0".to_vec())]; + let changes1 = vec![(b"k1".to_vec(), b"v1".to_vec())]; + let changes2 = vec![(b"k2".to_vec(), b"v2".to_vec())]; + let block0 = insert_header(&backend, 0, Default::default(), Some(changes0.clone()), Default::default()); + let block1 = insert_header(&backend, 1, block0, Some(changes1.clone()), Default::default()); + let block2 = insert_header(&backend, 2, block1, Some(changes2.clone()), Default::default()); + + let changes2_1_0 = vec![(b"k3".to_vec(), b"v3".to_vec())]; + let changes2_1_1 = vec![(b"k4".to_vec(), b"v4".to_vec())]; + let block2_1_0 = insert_header(&backend, 3, block2, Some(changes2_1_0.clone()), Default::default()); + let block2_1_1 = insert_header(&backend, 4, block2_1_0, Some(changes2_1_1.clone()), Default::default()); + + let changes2_2_0 = vec![(b"k5".to_vec(), b"v5".to_vec())]; + let changes2_2_1 = vec![(b"k6".to_vec(), b"v6".to_vec())]; + let block2_2_0 = insert_header(&backend, 3, block2, Some(changes2_2_0.clone()), Default::default()); + let block2_2_1 = insert_header(&backend, 4, block2_2_0, Some(changes2_2_1.clone()), Default::default()); + + // finalize block1 + backend.changes_tries_storage.meta.write().finalized_number = 1; + + // branch1: when asking for finalized block hash + let (changes1_root, _) = prepare_changes(changes1); + let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 }; + assert_eq!(backend.changes_tries_storage.root(&anchor, 1), Ok(Some(changes1_root))); + + // branch2: when asking for finalized block hash + let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_2_1, number: 4 }; + assert_eq!(backend.changes_tries_storage.root(&anchor, 1), Ok(Some(changes1_root))); + + // branch1: when asking for non-finalized block hash (search by traversal) + let (changes2_1_0_root, _) = prepare_changes(changes2_1_0); + let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 }; + assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_1_0_root))); + + // branch2: when asking for non-finalized block hash (search using canonicalized hint) + let (changes2_2_0_root, _) = prepare_changes(changes2_2_0); + let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_2_1, number: 4 }; + assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root))); + + // finalize first block of branch2 (block2_2_0) + backend.changes_tries_storage.meta.write().finalized_number = 3; + + // branch2: when asking for finalized block of this branch + assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root))); + + // branch1: when asking for finalized block of other branch + // => result is incorrect (returned for the block of branch1), but this is expected, + // because the other fork is abandoned (forked before finalized header) + let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 }; + assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root))); + } + + #[test] + fn changes_tries_are_pruned_on_finalization() { + let mut backend = Backend::::new_test(1000, 100); + backend.changes_tries_storage.min_blocks_to_keep = Some(8); + + let parent_hash = |number| { + if number == 0 { + Default::default() + } else { + backend.blockchain().header(BlockId::Number(number - 1)).unwrap().unwrap().hash() + } + }; + + let insert_regular_header = |with_changes, number| { + insert_header( + &backend, + number, + parent_hash(number), + if with_changes { changes(number) } else { None }, + Default::default(), + ); + }; + + let is_pruned = |number| { + let trie_root = backend + .blockchain() + .header(BlockId::Number(number)) + .unwrap().unwrap() + .digest() + .log(DigestItem::as_changes_trie_root) + .cloned(); + match trie_root { + Some(trie_root) => backend.changes_tries_storage.get(&trie_root, EMPTY_PREFIX).unwrap().is_none(), + None => true, + } + }; + + let finalize_block = |number| { + let header = backend.blockchain().header(BlockId::Number(number)).unwrap().unwrap(); + let mut tx = DBTransaction::new(); + let cache_ops = backend.changes_tries_storage.finalize( + &mut tx, + *header.parent_hash(), + header.hash(), + number, + None, + None, + ).unwrap(); + backend.storage.db.write(tx).unwrap(); + backend.changes_tries_storage.post_commit(Some(cache_ops)); + }; + + // configuration ranges: + // (0; 6] - None + // [7; 17] - Some(2^2): D2 is built at #10, #14; SD is built at #17 + // [18; 21] - None + // [22; 32] - Some(8^1): D1 is built at #29; SD is built at #32 + // [33; ... - Some(1) + let config_at_6 = Some(ChangesTrieConfiguration::new(2, 2)); + let config_at_17 = None; + let config_at_21 = Some(ChangesTrieConfiguration::new(8, 1)); + let config_at_32 = Some(ChangesTrieConfiguration::new(1, 0)); + + (0..6).for_each(|number| insert_regular_header(false, number)); + insert_header_with_configuration_change(&backend, 6, parent_hash(6), None, config_at_6); + (7..17).for_each(|number| insert_regular_header(true, number)); + insert_header_with_configuration_change(&backend, 17, parent_hash(17), changes(17), config_at_17); + (18..21).for_each(|number| insert_regular_header(false, number)); + insert_header_with_configuration_change(&backend, 21, parent_hash(21), None, config_at_21); + (22..32).for_each(|number| insert_regular_header(true, number)); + insert_header_with_configuration_change(&backend, 32, parent_hash(32), changes(32), config_at_32); + (33..50).for_each(|number| insert_regular_header(true, number)); + + // when only genesis is finalized, nothing is pruned + (0..=6).for_each(|number| assert!(is_pruned(number))); + (7..=17).for_each(|number| assert!(!is_pruned(number))); + (18..=21).for_each(|number| assert!(is_pruned(number))); + (22..50).for_each(|number| assert!(!is_pruned(number))); + + // when blocks [1; 18] are finalized, nothing is pruned + (1..=18).for_each(|number| finalize_block(number)); + (0..=6).for_each(|number| assert!(is_pruned(number))); + (7..=17).for_each(|number| assert!(!is_pruned(number))); + (18..=21).for_each(|number| assert!(is_pruned(number))); + (22..50).for_each(|number| assert!(!is_pruned(number))); + + // when block 19 is finalized, changes tries for blocks [7; 10] are pruned + finalize_block(19); + (0..=10).for_each(|number| assert!(is_pruned(number))); + (11..=17).for_each(|number| assert!(!is_pruned(number))); + (18..=21).for_each(|number| assert!(is_pruned(number))); + (22..50).for_each(|number| assert!(!is_pruned(number))); + + // when blocks [20; 22] are finalized, nothing is pruned + (20..=22).for_each(|number| finalize_block(number)); + (0..=10).for_each(|number| assert!(is_pruned(number))); + (11..=17).for_each(|number| assert!(!is_pruned(number))); + (18..=21).for_each(|number| assert!(is_pruned(number))); + (22..50).for_each(|number| assert!(!is_pruned(number))); + + // when block 23 is finalized, changes tries for blocks [11; 14] are pruned + finalize_block(23); + (0..=14).for_each(|number| assert!(is_pruned(number))); + (15..=17).for_each(|number| assert!(!is_pruned(number))); + (18..=21).for_each(|number| assert!(is_pruned(number))); + (22..50).for_each(|number| assert!(!is_pruned(number))); + + // when blocks [24; 25] are finalized, nothing is pruned + (24..=25).for_each(|number| finalize_block(number)); + (0..=14).for_each(|number| assert!(is_pruned(number))); + (15..=17).for_each(|number| assert!(!is_pruned(number))); + (18..=21).for_each(|number| assert!(is_pruned(number))); + (22..50).for_each(|number| assert!(!is_pruned(number))); + + // when block 26 is finalized, changes tries for blocks [15; 17] are pruned + finalize_block(26); + (0..=21).for_each(|number| assert!(is_pruned(number))); + (22..50).for_each(|number| assert!(!is_pruned(number))); + + // when blocks [27; 37] are finalized, nothing is pruned + (27..=37).for_each(|number| finalize_block(number)); + (0..=21).for_each(|number| assert!(is_pruned(number))); + (22..50).for_each(|number| assert!(!is_pruned(number))); + + // when block 38 is finalized, changes tries for blocks [22; 29] are pruned + finalize_block(38); + (0..=29).for_each(|number| assert!(is_pruned(number))); + (30..50).for_each(|number| assert!(!is_pruned(number))); + + // when blocks [39; 40] are finalized, nothing is pruned + (39..=40).for_each(|number| finalize_block(number)); + (0..=29).for_each(|number| assert!(is_pruned(number))); + (30..50).for_each(|number| assert!(!is_pruned(number))); + + // when block 41 is finalized, changes tries for blocks [30; 32] are pruned + finalize_block(41); + (0..=32).for_each(|number| assert!(is_pruned(number))); + (33..50).for_each(|number| assert!(!is_pruned(number))); + + // when block 42 is finalized, changes trie for block 33 is pruned + finalize_block(42); + (0..=33).for_each(|number| assert!(is_pruned(number))); + (34..50).for_each(|number| assert!(!is_pruned(number))); + + // when block 43 is finalized, changes trie for block 34 is pruned + finalize_block(43); + (0..=34).for_each(|number| assert!(is_pruned(number))); + (35..50).for_each(|number| assert!(!is_pruned(number))); + } + + #[test] + fn changes_tries_configuration_is_updated_on_block_insert() { + let backend = Backend::::new_test(1000, 100); + + // configurations at blocks + let config_at_1 = Some(ChangesTrieConfiguration { + digest_interval: 4, + digest_levels: 2, + }); + let config_at_3 = Some(ChangesTrieConfiguration { + digest_interval: 8, + digest_levels: 1, + }); + let config_at_5 = None; + let config_at_7 = Some(ChangesTrieConfiguration { + digest_interval: 8, + digest_levels: 1, + }); + + // insert some blocks + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); + let block1 = insert_header_with_configuration_change(&backend, 1, block0, None, config_at_1.clone()); + let block2 = insert_header(&backend, 2, block1, None, Default::default()); + let block3 = insert_header_with_configuration_change(&backend, 3, block2, None, config_at_3.clone()); + let block4 = insert_header(&backend, 4, block3, None, Default::default()); + let block5 = insert_header_with_configuration_change(&backend, 5, block4, None, config_at_5.clone()); + let block6 = insert_header(&backend, 6, block5, None, Default::default()); + let block7 = insert_header_with_configuration_change(&backend, 7, block6, None, config_at_7.clone()); + + // test configuration cache + let storage = &backend.changes_tries_storage; + assert_eq!( + storage.configuration_at(&BlockId::Hash(block1)).unwrap().config, + config_at_1.clone(), + ); + assert_eq!( + storage.configuration_at(&BlockId::Hash(block2)).unwrap().config, + config_at_1.clone(), + ); + assert_eq!( + storage.configuration_at(&BlockId::Hash(block3)).unwrap().config, + config_at_3.clone(), + ); + assert_eq!( + storage.configuration_at(&BlockId::Hash(block4)).unwrap().config, + config_at_3.clone(), + ); + assert_eq!( + storage.configuration_at(&BlockId::Hash(block5)).unwrap().config, + config_at_5.clone(), + ); + assert_eq!( + storage.configuration_at(&BlockId::Hash(block6)).unwrap().config, + config_at_5.clone(), + ); + assert_eq!( + storage.configuration_at(&BlockId::Hash(block7)).unwrap().config, + config_at_7.clone(), + ); + } + + #[test] + fn test_finalize_several_configuration_change_blocks_in_single_operation() { + let mut backend = Backend::::new_test(10, 10); + backend.changes_tries_storage.min_blocks_to_keep = Some(8); + + let configs = (0..=7).map(|i| Some(ChangesTrieConfiguration::new(2, i))).collect::>(); + + // insert unfinalized headers + let block0 = insert_header_with_configuration_change(&backend, 0, Default::default(), None, configs[0].clone()); + let block1 = insert_header_with_configuration_change(&backend, 1, block0, changes(1), configs[1].clone()); + let block2 = insert_header_with_configuration_change(&backend, 2, block1, changes(2), configs[2].clone()); + + let side_config2_1 = Some(ChangesTrieConfiguration::new(3, 2)); + let side_config2_2 = Some(ChangesTrieConfiguration::new(3, 3)); + let block2_1 = insert_header_with_configuration_change(&backend, 2, block1, changes(8), side_config2_1.clone()); + let _ = insert_header_with_configuration_change(&backend, 3, block2_1, changes(9), side_config2_2.clone()); + + // insert finalized header => 4 headers are finalized at once + let header3 = Header { + number: 3, + parent_hash: block2, + state_root: Default::default(), + digest: Digest { + logs: vec![ + DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration(configs[3].clone())), + ], + }, + extrinsics_root: Default::default(), + }; + let block3 = header3.hash(); + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(block2)).unwrap(); + op.mark_finalized(BlockId::Hash(block1), None).unwrap(); + op.mark_finalized(BlockId::Hash(block2), None).unwrap(); + op.set_block_data(header3, None, None, NewBlockState::Final).unwrap(); + backend.commit_operation(op).unwrap(); + + // insert more unfinalized headers + let block4 = insert_header_with_configuration_change(&backend, 4, block3, changes(4), configs[4].clone()); + let block5 = insert_header_with_configuration_change(&backend, 5, block4, changes(5), configs[5].clone()); + let block6 = insert_header_with_configuration_change(&backend, 6, block5, changes(6), configs[6].clone()); + + // insert finalized header => 4 headers are finalized at once + let header7 = Header { + number: 7, + parent_hash: block6, + state_root: Default::default(), + digest: Digest { + logs: vec![ + DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration(configs[7].clone())), + ], + }, + extrinsics_root: Default::default(), + }; + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(block6)).unwrap(); + op.mark_finalized(BlockId::Hash(block4), None).unwrap(); + op.mark_finalized(BlockId::Hash(block5), None).unwrap(); + op.mark_finalized(BlockId::Hash(block6), None).unwrap(); + op.set_block_data(header7, None, None, NewBlockState::Final).unwrap(); + backend.commit_operation(op).unwrap(); + } + + #[test] + fn changes_tries_configuration_is_reverted() { + let backend = Backend::::new_test(10, 10); + + let config0 = Some(ChangesTrieConfiguration::new(2, 5)); + let block0 = insert_header_with_configuration_change(&backend, 0, Default::default(), None, config0); + let config1 = Some(ChangesTrieConfiguration::new(2, 6)); + let block1 = insert_header_with_configuration_change(&backend, 1, block0, changes(0), config1); + backend.finalize_block(BlockId::Number(1), Some(vec![42])).unwrap(); + let config2 = Some(ChangesTrieConfiguration::new(2, 7)); + let block2 = insert_header_with_configuration_change(&backend, 2, block1, changes(1), config2); + let config2_1 = Some(ChangesTrieConfiguration::new(2, 8)); + let _ = insert_header_with_configuration_change(&backend, 3, block2, changes(10), config2_1); + let config2_2 = Some(ChangesTrieConfiguration::new(2, 9)); + let block2_2 = insert_header_with_configuration_change(&backend, 3, block2, changes(20), config2_2); + let config2_3 = Some(ChangesTrieConfiguration::new(2, 10)); + let _ = insert_header_with_configuration_change(&backend, 4, block2_2, changes(30), config2_3); + + // before truncate there are 2 unfinalized forks - block2_1+block2_3 + assert_eq!( + backend.changes_tries_storage.cache.0.write() + .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) + .unwrap() + .unfinalized() + .iter() + .map(|fork| fork.head().valid_from.number) + .collect::>(), + vec![3, 4], + ); + + // after truncating block2_3 - there are 2 unfinalized forks - block2_1+block2_2 + backend.revert(1, false).unwrap(); + assert_eq!( + backend.changes_tries_storage.cache.0.write() + .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) + .unwrap() + .unfinalized() + .iter() + .map(|fork| fork.head().valid_from.number) + .collect::>(), + vec![3, 3], + ); + + // after truncating block2_1 && block2_2 - there are still two unfinalized forks (cache impl specifics), + // the 1st one points to the block #3 because it isn't truncated + backend.revert(1, false).unwrap(); + assert_eq!( + backend.changes_tries_storage.cache.0.write() + .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) + .unwrap() + .unfinalized() + .iter() + .map(|fork| fork.head().valid_from.number) + .collect::>(), + vec![3, 2], + ); + + // after truncating block2 - there are no unfinalized forks + backend.revert(1, false).unwrap(); + assert!( + backend.changes_tries_storage.cache.0.write() + .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) + .unwrap() + .unfinalized() + .iter() + .map(|fork| fork.head().valid_from.number) + .collect::>() + .is_empty(), + ); + } +} diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 06d0dc18ffa36..be569194972cc 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -31,18 +31,21 @@ pub mod offchain; mod children; mod cache; +mod changes_tries_storage; mod storage_cache; +#[cfg(any(feature = "kvdb-rocksdb", test))] +mod upgrade; mod utils; mod stats; use std::sync::Arc; use std::path::PathBuf; use std::io; -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use sc_client_api::{execution_extensions::ExecutionExtensions, ForkBlocks, UsageInfo, MemoryInfo, BadBlocks, IoInfo}; use sc_client_api::backend::NewBlockState; -use sc_client_api::backend::{StorageCollection, ChildStorageCollection}; +use sc_client_api::backend::PrunableStateChangesTrieStorage; use sp_blockchain::{ Result as ClientResult, Error as ClientError, well_known_cache_keys, HeaderBackend, @@ -51,11 +54,11 @@ use codec::{Decode, Encode}; use hash_db::Prefix; use kvdb::{KeyValueDB, DBTransaction}; use sp_trie::{MemoryDB, PrefixedMemoryDB, prefixed_key}; -use parking_lot::{Mutex, RwLock}; -use sp_core::{ChangesTrieConfiguration, convert_hash, traits::CodeExecutor}; +use parking_lot::RwLock; +use sp_core::{ChangesTrieConfiguration, traits::CodeExecutor}; use sp_core::storage::{well_known_keys, ChildInfo}; use sp_runtime::{ - generic::{BlockId, DigestItem}, Justification, Storage, + generic::BlockId, Justification, Storage, BuildStorage, }; use sp_runtime::traits::{ @@ -63,10 +66,12 @@ use sp_runtime::traits::{ }; use sc_executor::RuntimeInfo; use sp_state_machine::{ - DBValue, ChangesTrieTransaction, ChangesTrieCacheAction, ChangesTrieBuildCache, - backend::Backend as StateBackend, UsageInfo as StateUsageInfo, + DBValue, ChangesTrieTransaction, ChangesTrieCacheAction, UsageInfo as StateUsageInfo, + StorageCollection, ChildStorageCollection, + backend::Backend as StateBackend, }; -use crate::utils::{Meta, db_err, meta_keys, read_db, read_meta}; +use crate::utils::{DatabaseType, Meta, db_err, meta_keys, read_db, read_meta}; +use crate::changes_tries_storage::{DbChangesTrieStorage, DbChangesTrieStorageTransaction}; use sc_client::leaves::{LeafSet, FinalizationDisplaced}; use sc_state_db::StateDb; use sp_blockchain::{CachedHeaderMetadata, HeaderMetadata, HeaderMetadataCache}; @@ -274,7 +279,7 @@ pub enum DatabaseSettingsSrc { pub fn new_client( settings: DatabaseSettings, executor: E, - genesis_storage: S, + genesis_storage: &S, fork_blocks: ForkBlocks, bad_blocks: BadBlocks, execution_extensions: ExecutionExtensions, @@ -322,6 +327,7 @@ pub(crate) mod columns { pub const AUX: u32 = 8; /// Offchain workers local storage pub const OFFCHAIN: u32 = 9; + pub const CACHE: u32 = 10; } struct PendingBlock { @@ -352,7 +358,7 @@ pub struct BlockchainDb { impl BlockchainDb { fn new(db: Arc) -> ClientResult { - let meta = read_meta::(&*db, columns::META, columns::HEADER)?; + let meta = read_meta::(&*db, columns::HEADER)?; let leaves = LeafSet::read_from_db(&*db, columns::META, meta_keys::LEAF_PREFIX)?; Ok(BlockchainDb { db, @@ -511,7 +517,8 @@ pub struct BlockImportOperation { storage_updates: StorageCollection, child_storage_updates: ChildStorageCollection, changes_trie_updates: MemoryDB>, - changes_trie_cache_update: Option>>, + changes_trie_build_cache_update: Option>>, + changes_trie_config_update: Option>, pending_block: Option>, aux_ops: Vec<(Vec, Option>)>, finalized_blocks: Vec<(BlockId, Option)>, @@ -545,6 +552,9 @@ impl sc_client_api::backend::BlockImportOperation for Bloc leaf_state: NewBlockState, ) -> ClientResult<()> { assert!(self.pending_block.is_none(), "Only one block per operation is allowed"); + if let Some(changes_trie_config_update) = changes_tries_storage::extract_new_configuration(&header) { + self.changes_trie_config_update = Some(changes_trie_config_update.clone()); + } self.pending_block = Some(PendingBlock { header, body, @@ -583,12 +593,22 @@ impl sc_client_api::backend::BlockImportOperation for Bloc child_content.data.into_iter().map(|(k, v)| (k, Some(v))), child_content.child_info), ); + let mut changes_trie_config: Option = None; let (root, transaction) = self.old_state.full_storage_root( - storage.top.into_iter().map(|(k, v)| (k, Some(v))), + storage.top.into_iter().map(|(k, v)| { + if k == well_known_keys::CHANGES_TRIE_CONFIG { + changes_trie_config = Some( + Decode::decode(&mut &v[..]) + .expect("changes trie configuration is encoded properly at genesis") + ); + } + (k, Some(v)) + }), child_delta ); self.db_updates = transaction; + self.changes_trie_config_update = Some(changes_trie_config); self.commit_state = true; Ok(root) } @@ -598,7 +618,7 @@ impl sc_client_api::backend::BlockImportOperation for Bloc update: ChangesTrieTransaction, NumberFor>, ) -> ClientResult<()> { self.changes_trie_updates = update.0; - self.changes_trie_cache_update = Some(update.1); + self.changes_trie_build_cache_update = Some(update.1); Ok(()) } @@ -674,174 +694,6 @@ impl sp_state_machine::Storage> for DbGenesisSto } } -/// A database wrapper for changes tries. -pub struct DbChangesTrieStorage { - db: Arc, - meta: Arc, Block::Hash>>>, - min_blocks_to_keep: Option, - cache: RwLock>>, -} - -impl DbChangesTrieStorage { - /// Commit new changes trie. - pub fn commit(&self, tx: &mut DBTransaction, mut changes_trie: MemoryDB>) { - for (key, (val, _)) in changes_trie.drain() { - tx.put(columns::CHANGES_TRIE, key.as_ref(), &val); - } - } - - /// Commit changes into changes trie build cache. - pub fn commit_cache(&self, cache_update: ChangesTrieCacheAction>) { - self.cache.write().perform(cache_update); - } - - /// Prune obsolete changes tries. - pub fn prune( - &self, - config: &ChangesTrieConfiguration, - tx: &mut DBTransaction, - block_hash: Block::Hash, - block_num: NumberFor, - ) { - // never prune on archive nodes - let min_blocks_to_keep = match self.min_blocks_to_keep { - Some(min_blocks_to_keep) => min_blocks_to_keep, - None => return, - }; - - sp_state_machine::prune_changes_tries( - config, - &*self, - min_blocks_to_keep.into(), - &sp_state_machine::ChangesTrieAnchorBlockId { - hash: convert_hash(&block_hash), - number: block_num, - }, - |node| tx.delete(columns::CHANGES_TRIE, node.as_ref())); - } -} - -impl sc_client_api::backend::PrunableStateChangesTrieStorage - for DbChangesTrieStorage -{ - fn oldest_changes_trie_block( - &self, - config: &ChangesTrieConfiguration, - best_finalized_block: NumberFor, - ) -> NumberFor { - match self.min_blocks_to_keep { - Some(min_blocks_to_keep) => sp_state_machine::oldest_non_pruned_changes_trie( - config, - min_blocks_to_keep.into(), - best_finalized_block, - ), - None => One::one(), - } - } -} - -impl sp_state_machine::ChangesTrieRootsStorage, NumberFor> - for DbChangesTrieStorage -{ - fn build_anchor( - &self, - hash: Block::Hash, - ) -> Result>, String> { - utils::read_header::( - &*self.db, - columns::KEY_LOOKUP, - columns::HEADER, - BlockId::Hash(hash), - ) - .map_err(|e| e.to_string()) - .and_then(|maybe_header| maybe_header.map(|header| - sp_state_machine::ChangesTrieAnchorBlockId { - hash, - number: *header.number(), - } - ).ok_or_else(|| format!("Unknown header: {}", hash))) - } - - fn root( - &self, - anchor: &sp_state_machine::ChangesTrieAnchorBlockId>, - block: NumberFor, - ) -> Result, String> { - // check API requirement: we can't get NEXT block(s) based on anchor - if block > anchor.number { - return Err( - format!("Can't get changes trie root at {} using anchor at {}", block, anchor.number) - ) - } - - // we need to get hash of the block to resolve changes trie root - let block_id = if block <= self.meta.read().finalized_number { - // if block is finalized, we could just read canonical hash - BlockId::Number(block) - } else { - // the block is not finalized - let mut current_num = anchor.number; - let mut current_hash: Block::Hash = convert_hash(&anchor.hash); - let maybe_anchor_header: Block::Header = utils::require_header::( - &*self.db, columns::KEY_LOOKUP, columns::HEADER, BlockId::Number(current_num) - ).map_err(|e| e.to_string())?; - if maybe_anchor_header.hash() == current_hash { - // if anchor is canonicalized, then the block is also canonicalized - BlockId::Number(block) - } else { - // else (block is not finalized + anchor is not canonicalized): - // => we should find the required block hash by traversing - // back from the anchor to the block with given number - while current_num != block { - let current_header: Block::Header = utils::require_header::( - &*self.db, columns::KEY_LOOKUP, columns::HEADER, BlockId::Hash(current_hash) - ).map_err(|e| e.to_string())?; - - current_hash = *current_header.parent_hash(); - current_num = current_num - One::one(); - } - - BlockId::Hash(current_hash) - } - }; - - Ok( - utils::require_header::( - &*self.db, - columns::KEY_LOOKUP, - columns::HEADER, - block_id, - ) - .map_err(|e| e.to_string())? - .digest() - .log(DigestItem::as_changes_trie_root) - .cloned() - ) - } -} - -impl sp_state_machine::ChangesTrieStorage, NumberFor> - for DbChangesTrieStorage -{ - fn as_roots_storage(&self) - -> &dyn sp_state_machine::ChangesTrieRootsStorage, NumberFor> - { - self - } - - fn with_cached_changed_keys( - &self, - root: &Block::Hash, - functor: &mut dyn FnMut(&HashMap>, HashSet>>), - ) -> bool { - self.cache.read().with_changed_keys(root, functor) - } - - fn get(&self, key: &Block::Hash, _prefix: Prefix) -> Result, String> { - self.db.get(columns::CHANGES_TRIE, key.as_ref()).map_err(|err| format!("{}", err)) - } -} - /// Frozen `value` at time `at`. /// /// Used as inner structure under lock in `FrozenForDuration`. @@ -889,9 +741,6 @@ pub struct Backend { storage: Arc>, offchain_storage: offchain::LocalStorage, changes_tries_storage: DbChangesTrieStorage, - /// None<*> means that the value hasn't been cached yet. Some(*) means that the value (either None or - /// Some(*)) has been cached and is valid. - changes_trie_config: Mutex>>, blockchain: BlockchainDb, canonicalization_delay: u64, shared_cache: SharedCache, @@ -906,7 +755,7 @@ impl Backend { /// /// The pruning window is how old a block must be before the state is pruned. pub fn new(config: DatabaseSettings, canonicalization_delay: u64) -> ClientResult { - let db = crate::utils::open_database(&config, columns::META, "full")?; + let db = crate::utils::open_database::(&config, DatabaseType::Full)?; Self::from_kvdb(db as Arc<_>, canonicalization_delay, &config) } @@ -942,22 +791,25 @@ impl Backend { state_db, }; let offchain_storage = offchain::LocalStorage::new(db.clone()); - let changes_tries_storage = DbChangesTrieStorage { + let changes_tries_storage = DbChangesTrieStorage::new( db, + columns::META, + columns::CHANGES_TRIE, + columns::KEY_LOOKUP, + columns::HEADER, + columns::CACHE, meta, - min_blocks_to_keep: if is_archive_pruning { + if is_archive_pruning { None } else { Some(MIN_BLOCKS_TO_KEEP_CHANGES_TRIES_FOR) }, - cache: RwLock::new(ChangesTrieBuildCache::new()), - }; + )?; Ok(Backend { storage: Arc::new(storage_db), offchain_storage, changes_tries_storage, - changes_trie_config: Mutex::new(None), blockchain, canonicalization_delay, shared_cache: new_shared_cache( @@ -1026,26 +878,6 @@ impl Backend { self.blockchain.db.iter(columns::HEADER).count() as u64 } - /// Read (from storage or cache) changes trie config. - /// - /// Currently changes tries configuration is set up once (at genesis) and could not - /// be changed. Thus, we'll actually read value once and then just use cached value. - fn changes_trie_config(&self, block: Block::Hash) -> ClientResult> { - let mut cached_changes_trie_config = self.changes_trie_config.lock(); - match cached_changes_trie_config.clone() { - Some(cached_changes_trie_config) => Ok(cached_changes_trie_config), - None => { - use sc_client_api::backend::Backend; - let changes_trie_config = self - .state_at(BlockId::Hash(block))? - .storage(well_known_keys::CHANGES_TRIE_CONFIG)? - .and_then(|v| Decode::decode(&mut &*v).ok()); - *cached_changes_trie_config = Some(changes_trie_config.clone()); - Ok(changes_trie_config) - }, - } - } - /// Handle setting head within a transaction. `route_to` should be the last /// block that existed in the database. `best_to` should be the best block /// to be set. @@ -1137,6 +969,7 @@ impl Backend { header: &Block::Header, last_finalized: Option, justification: Option, + changes_trie_cache_ops: &mut Option>, finalization_displaced: &mut Option>>, ) -> ClientResult<(Block::Hash, ::Number, bool, bool)> { // TODO: ensure best chain contains this block. @@ -1144,8 +977,10 @@ impl Backend { self.ensure_sequential_finalization(header, last_finalized)?; self.note_finalized( transaction, + false, header, *hash, + changes_trie_cache_ops, finalization_displaced, )?; @@ -1204,6 +1039,7 @@ impl Backend { let mut meta_updates = Vec::with_capacity(operation.finalized_blocks.len()); let mut last_finalized_hash = self.blockchain.meta.read().finalized_hash; + let mut changes_trie_cache_ops = None; for (block, justification) in operation.finalized_blocks { let block_hash = self.blockchain.expect_block_hash_from_id(&block)?; let block_header = self.blockchain.expect_header(BlockId::Hash(block_hash))?; @@ -1214,6 +1050,7 @@ impl Backend { &block_header, Some(last_finalized_hash), justification, + &mut changes_trie_cache_ops, &mut finalization_displaced_leaves, )?); last_finalized_hash = block_hash; @@ -1257,6 +1094,11 @@ impl Backend { if number.is_zero() { transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, &lookup_key); transaction.put(columns::META, meta_keys::GENESIS_HASH, hash.as_ref()); + + // for tests, because config is set from within the reset_storage + if operation.changes_trie_config_update.is_none() { + operation.changes_trie_config_update = Some(None); + } } let finalized = if operation.commit_state { @@ -1293,8 +1135,21 @@ impl Backend { let header = &pending_block.header; let is_best = pending_block.leaf_state.is_best(); let changes_trie_updates = operation.changes_trie_updates; - - self.changes_tries_storage.commit(&mut transaction, changes_trie_updates); + let changes_trie_config_update = operation.changes_trie_config_update; + changes_trie_cache_ops = Some(self.changes_tries_storage.commit( + &mut transaction, + changes_trie_updates, + cache::ComplexBlockId::new( + *header.parent_hash(), + if number.is_zero() { Zero::zero() } else { number - One::one() }, + ), + cache::ComplexBlockId::new(hash, number), + header, + finalized, + changes_trie_config_update, + changes_trie_cache_ops, + )?); + self.state_usage.merge_sm(operation.old_state.usage_info()); let cache = operation.old_state.release(); // release state reference so that it can be finalized if finalized { @@ -1302,8 +1157,10 @@ impl Backend { self.ensure_sequential_finalization(header, Some(last_finalized_hash))?; self.note_finalized( &mut transaction, + true, header, hash, + &mut changes_trie_cache_ops, &mut finalization_displaced_leaves, )?; } else { @@ -1353,11 +1210,15 @@ impl Backend { let write_result = self.storage.db.write(transaction).map_err(db_err); - if let Some(changes_trie_cache_update) = operation.changes_trie_cache_update { - self.changes_tries_storage.commit_cache(changes_trie_cache_update); - } - - if let Some((number, hash, enacted, retracted, displaced_leaf, is_best, mut cache)) = imported { + if let Some(( + number, + hash, + enacted, + retracted, + displaced_leaf, + is_best, + mut cache, + )) = imported { if let Err(e) = write_result { let mut leaves = self.blockchain.leaves.write(); let mut undo = leaves.undo(); @@ -1383,6 +1244,11 @@ impl Backend { ); } + if let Some(changes_trie_build_cache_update) = operation.changes_trie_build_cache_update { + self.changes_tries_storage.commit_build_cache(changes_trie_build_cache_update); + } + self.changes_tries_storage.post_commit(changes_trie_cache_ops); + if let Some((enacted, retracted)) = cache_update { self.shared_cache.lock().sync(&enacted, &retracted); } @@ -1401,15 +1267,15 @@ impl Backend { fn note_finalized( &self, transaction: &mut DBTransaction, + is_inserted: bool, f_header: &Block::Header, f_hash: Block::Hash, + changes_trie_cache_ops: &mut Option>, displaced: &mut Option>> ) -> ClientResult<()> { let f_num = f_header.number().clone(); if self.storage.state_db.best_canonical().map(|c| f_num.saturated_into::() > c).unwrap_or(true) { - let parent_hash = f_header.parent_hash().clone(); - let lookup_key = utils::number_and_hash_to_lookup_key(f_num, f_hash.clone())?; transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, &lookup_key); @@ -1417,9 +1283,16 @@ impl Backend { .map_err(|e: sc_state_db::Error| sp_blockchain::Error::from(format!("State database error: {:?}", e)))?; apply_state_commit(transaction, commit); - let changes_trie_config = self.changes_trie_config(parent_hash)?; - if let Some(changes_trie_config) = changes_trie_config { - self.changes_tries_storage.prune(&changes_trie_config, transaction, f_hash, f_num); + if !f_num.is_zero() { + let new_changes_trie_cache_ops = self.changes_tries_storage.finalize( + transaction, + *f_header.parent_hash(), + f_hash, + f_num, + if is_inserted { Some(&f_header) } else { None }, + changes_trie_cache_ops.take(), + )?; + *changes_trie_cache_ops = Some(new_changes_trie_cache_ops); } } @@ -1476,7 +1349,6 @@ impl sc_client_api::backend::Backend for Backend { type BlockImportOperation = BlockImportOperation; type Blockchain = BlockchainDb; type State = CachingState, Block>; - type ChangesTrieStorage = DbChangesTrieStorage; type OffchainStorage = offchain::LocalStorage; fn begin_operation(&self) -> ClientResult { @@ -1487,8 +1359,9 @@ impl sc_client_api::backend::Backend for Backend { db_updates: PrefixedMemoryDB::default(), storage_updates: Default::default(), child_storage_updates: Default::default(), + changes_trie_config_update: None, changes_trie_updates: MemoryDB::default(), - changes_trie_cache_update: None, + changes_trie_build_cache_update: None, aux_ops: Vec::new(), finalized_blocks: Vec::new(), set_head: None, @@ -1532,16 +1405,19 @@ impl sc_client_api::backend::Backend for Backend { let header = self.blockchain.expect_header(block)?; let mut displaced = None; let commit = |displaced| { + let mut changes_trie_cache_ops = None; let (hash, number, is_best, is_finalized) = self.finalize_block_with_transaction( &mut transaction, &hash, &header, None, justification, + &mut changes_trie_cache_ops, displaced, )?; self.storage.db.write(transaction).map_err(db_err)?; self.blockchain.update_meta(hash, number, is_best, is_finalized); + self.changes_tries_storage.post_commit(changes_trie_cache_ops); Ok(()) }; match commit(&mut displaced) { @@ -1557,7 +1433,7 @@ impl sc_client_api::backend::Backend for Backend { Ok(()) } - fn changes_trie_storage(&self) -> Option<&Self::ChangesTrieStorage> { + fn changes_trie_storage(&self) -> Option<&dyn PrunableStateChangesTrieStorage> { Some(&self.changes_tries_storage) } @@ -1616,6 +1492,7 @@ impl sc_client_api::backend::Backend for Backend { match self.storage.state_db.revert_one() { Some(commit) => { apply_state_commit(&mut transaction, commit); + let removed_number = best_number; let removed = self.blockchain.header(BlockId::Number(best_number))?.ok_or_else( || sp_blockchain::Error::UnknownBlock( format!("Error reverting to {}. Block hash not found.", best_number)))?; @@ -1628,6 +1505,13 @@ impl sc_client_api::backend::Backend for Backend { let update_finalized = best_number < finalized; let key = utils::number_and_hash_to_lookup_key(best_number.clone(), &best_hash)?; + let changes_trie_cache_ops = self.changes_tries_storage.revert( + &mut transaction, + &cache::ComplexBlockId::new( + removed.hash(), + removed_number, + ), + )?; transaction.put(columns::META, meta_keys::BEST_BLOCK, &key); if update_finalized { transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, &key); @@ -1635,6 +1519,7 @@ impl sc_client_api::backend::Backend for Backend { transaction.delete(columns::KEY_LOOKUP, removed.hash().as_ref()); children::remove_children(&mut transaction, columns::META, meta_keys::CHILDREN_PREFIX, best_hash); self.storage.db.write(transaction).map_err(db_err)?; + self.changes_tries_storage.post_commit(Some(changes_trie_cache_ops)); self.blockchain.update_meta(best_hash, best_number, true, update_finalized); } None => return Ok(c.saturated_into::>()) @@ -1735,6 +1620,7 @@ impl sc_client_api::backend::Backend for Backend { } fn destroy_state(&self, state: Self::State) -> ClientResult<()> { + self.state_usage.merge_sm(state.usage_info()); if let Some(hash) = state.cache.parent_hash.clone() { let is_best = self.blockchain.meta.read().best_hash == hash; state.release().sync_cache(&[], &[], vec![], vec![], None, None, is_best); @@ -1749,14 +1635,8 @@ impl sc_client_api::backend::Backend for Backend { impl sc_client_api::backend::LocalBackend for Backend {} -/// TODO: remove me in #3201 -pub fn unused_sink(cache_tx: crate::cache::DbCacheTransaction) { - cache_tx.on_block_revert(&crate::cache::ComplexBlockId::new(Default::default(), 0.into())).unwrap(); - unimplemented!() -} - #[cfg(test)] -mod tests { +pub(crate) mod tests { use hash_db::{HashDB, EMPTY_PREFIX}; use super::*; use crate::columns; @@ -1765,12 +1645,13 @@ mod tests { use sc_client::blockchain::Backend as BLBTrait; use sp_runtime::testing::{Header, Block as RawBlock, ExtrinsicWrapper}; use sp_runtime::traits::{Hash, BlakeTwo256}; - use sp_state_machine::{TrieMut, TrieDBMut, ChangesTrieRootsStorage, ChangesTrieStorage}; + use sp_runtime::generic::DigestItem; + use sp_state_machine::{TrieMut, TrieDBMut}; use sp_blockchain::{lowest_common_ancestor, tree_route}; - type Block = RawBlock>; + pub(crate) type Block = RawBlock>; - fn prepare_changes(changes: Vec<(Vec, Vec)>) -> (H256, MemoryDB) { + pub fn prepare_changes(changes: Vec<(Vec, Vec)>) -> (H256, MemoryDB) { let mut changes_root = H256::default(); let mut changes_trie_update = MemoryDB::::default(); { @@ -1786,20 +1667,22 @@ mod tests { (changes_root, changes_trie_update) } - fn insert_header( + pub fn insert_header( backend: &Backend, number: u64, parent_hash: H256, - changes: Vec<(Vec, Vec)>, + changes: Option, Vec)>>, extrinsics_root: H256, ) -> H256 { use sp_runtime::testing::Digest; - let (changes_root, changes_trie_update) = prepare_changes(changes); - let digest = Digest { - logs: vec![ - DigestItem::ChangesTrieRoot(changes_root), - ], - }; + + let mut digest = Digest::default(); + let mut changes_trie_update = Default::default(); + if let Some(changes) = changes { + let (root, update) = prepare_changes(changes); + digest.push(DigestItem::ChangesTrieRoot(root)); + changes_trie_update = update; + } let header = Header { number, parent_hash, @@ -2126,238 +2009,20 @@ mod tests { ).unwrap().is_none()); } - #[test] - fn changes_trie_storage_works() { - let backend = Backend::::new_test(1000, 100); - backend.changes_tries_storage.meta.write().finalized_number = 1000; - - - let check_changes = |backend: &Backend, block: u64, changes: Vec<(Vec, Vec)>| { - let (changes_root, mut changes_trie_update) = prepare_changes(changes); - let anchor = sp_state_machine::ChangesTrieAnchorBlockId { - hash: backend.blockchain().header(BlockId::Number(block)).unwrap().unwrap().hash(), - number: block - }; - assert_eq!(backend.changes_tries_storage.root(&anchor, block), Ok(Some(changes_root))); - - for (key, (val, _)) in changes_trie_update.drain() { - assert_eq!(backend.changes_trie_storage().unwrap().get(&key, EMPTY_PREFIX), Ok(Some(val))); - } - }; - - let changes0 = vec![(b"key_at_0".to_vec(), b"val_at_0".to_vec())]; - let changes1 = vec![ - (b"key_at_1".to_vec(), b"val_at_1".to_vec()), - (b"another_key_at_1".to_vec(), b"another_val_at_1".to_vec()), - ]; - let changes2 = vec![(b"key_at_2".to_vec(), b"val_at_2".to_vec())]; - - let block0 = insert_header(&backend, 0, Default::default(), changes0.clone(), Default::default()); - let block1 = insert_header(&backend, 1, block0, changes1.clone(), Default::default()); - let _ = insert_header(&backend, 2, block1, changes2.clone(), Default::default()); - - // check that the storage contains tries for all blocks - check_changes(&backend, 0, changes0); - check_changes(&backend, 1, changes1); - check_changes(&backend, 2, changes2); - } - - #[test] - fn changes_trie_storage_works_with_forks() { - let backend = Backend::::new_test(1000, 100); - - let changes0 = vec![(b"k0".to_vec(), b"v0".to_vec())]; - let changes1 = vec![(b"k1".to_vec(), b"v1".to_vec())]; - let changes2 = vec![(b"k2".to_vec(), b"v2".to_vec())]; - let block0 = insert_header(&backend, 0, Default::default(), changes0.clone(), Default::default()); - let block1 = insert_header(&backend, 1, block0, changes1.clone(), Default::default()); - let block2 = insert_header(&backend, 2, block1, changes2.clone(), Default::default()); - - let changes2_1_0 = vec![(b"k3".to_vec(), b"v3".to_vec())]; - let changes2_1_1 = vec![(b"k4".to_vec(), b"v4".to_vec())]; - let block2_1_0 = insert_header(&backend, 3, block2, changes2_1_0.clone(), Default::default()); - let block2_1_1 = insert_header(&backend, 4, block2_1_0, changes2_1_1.clone(), Default::default()); - - let changes2_2_0 = vec![(b"k5".to_vec(), b"v5".to_vec())]; - let changes2_2_1 = vec![(b"k6".to_vec(), b"v6".to_vec())]; - let block2_2_0 = insert_header(&backend, 3, block2, changes2_2_0.clone(), Default::default()); - let block2_2_1 = insert_header(&backend, 4, block2_2_0, changes2_2_1.clone(), Default::default()); - - // finalize block1 - backend.changes_tries_storage.meta.write().finalized_number = 1; - - // branch1: when asking for finalized block hash - let (changes1_root, _) = prepare_changes(changes1); - let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 1), Ok(Some(changes1_root))); - - // branch2: when asking for finalized block hash - let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_2_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 1), Ok(Some(changes1_root))); - - // branch1: when asking for non-finalized block hash (search by traversal) - let (changes2_1_0_root, _) = prepare_changes(changes2_1_0); - let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_1_0_root))); - - // branch2: when asking for non-finalized block hash (search using canonicalized hint) - let (changes2_2_0_root, _) = prepare_changes(changes2_2_0); - let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_2_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root))); - - // finalize first block of branch2 (block2_2_0) - backend.changes_tries_storage.meta.write().finalized_number = 3; - - // branch2: when asking for finalized block of this branch - assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root))); - - // branch1: when asking for finalized block of other branch - // => result is incorrect (returned for the block of branch1), but this is expected, - // because the other fork is abandoned (forked before finalized header) - let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root))); - } - - #[test] - fn changes_tries_with_digest_are_pruned_on_finalization() { - let mut backend = Backend::::new_test(1000, 100); - backend.changes_tries_storage.min_blocks_to_keep = Some(8); - let config = ChangesTrieConfiguration { - digest_interval: 2, - digest_levels: 2, - }; - - // insert some blocks - let block0 = insert_header(&backend, 0, Default::default(), vec![(b"key_at_0".to_vec(), b"val_at_0".to_vec())], Default::default()); - let block1 = insert_header(&backend, 1, block0, vec![(b"key_at_1".to_vec(), b"val_at_1".to_vec())], Default::default()); - let block2 = insert_header(&backend, 2, block1, vec![(b"key_at_2".to_vec(), b"val_at_2".to_vec())], Default::default()); - let block3 = insert_header(&backend, 3, block2, vec![(b"key_at_3".to_vec(), b"val_at_3".to_vec())], Default::default()); - let block4 = insert_header(&backend, 4, block3, vec![(b"key_at_4".to_vec(), b"val_at_4".to_vec())], Default::default()); - let block5 = insert_header(&backend, 5, block4, vec![(b"key_at_5".to_vec(), b"val_at_5".to_vec())], Default::default()); - let block6 = insert_header(&backend, 6, block5, vec![(b"key_at_6".to_vec(), b"val_at_6".to_vec())], Default::default()); - let block7 = insert_header(&backend, 7, block6, vec![(b"key_at_7".to_vec(), b"val_at_7".to_vec())], Default::default()); - let block8 = insert_header(&backend, 8, block7, vec![(b"key_at_8".to_vec(), b"val_at_8".to_vec())], Default::default()); - let block9 = insert_header(&backend, 9, block8, vec![(b"key_at_9".to_vec(), b"val_at_9".to_vec())], Default::default()); - let block10 = insert_header(&backend, 10, block9, vec![(b"key_at_10".to_vec(), b"val_at_10".to_vec())], Default::default()); - let block11 = insert_header(&backend, 11, block10, vec![(b"key_at_11".to_vec(), b"val_at_11".to_vec())], Default::default()); - let block12 = insert_header(&backend, 12, block11, vec![(b"key_at_12".to_vec(), b"val_at_12".to_vec())], Default::default()); - let block13 = insert_header(&backend, 13, block12, vec![(b"key_at_13".to_vec(), b"val_at_13".to_vec())], Default::default()); - backend.changes_tries_storage.meta.write().finalized_number = 13; - - // check that roots of all tries are in the columns::CHANGES_TRIE - let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block13, number: 13 }; - fn read_changes_trie_root(backend: &Backend, num: u64) -> H256 { - backend.blockchain().header(BlockId::Number(num)).unwrap().unwrap().digest().logs().iter() - .find(|i| i.as_changes_trie_root().is_some()).unwrap().as_changes_trie_root().unwrap().clone() - } - let root1 = read_changes_trie_root(&backend, 1); assert_eq!(backend.changes_tries_storage.root(&anchor, 1).unwrap(), Some(root1)); - let root2 = read_changes_trie_root(&backend, 2); assert_eq!(backend.changes_tries_storage.root(&anchor, 2).unwrap(), Some(root2)); - let root3 = read_changes_trie_root(&backend, 3); assert_eq!(backend.changes_tries_storage.root(&anchor, 3).unwrap(), Some(root3)); - let root4 = read_changes_trie_root(&backend, 4); assert_eq!(backend.changes_tries_storage.root(&anchor, 4).unwrap(), Some(root4)); - let root5 = read_changes_trie_root(&backend, 5); assert_eq!(backend.changes_tries_storage.root(&anchor, 5).unwrap(), Some(root5)); - let root6 = read_changes_trie_root(&backend, 6); assert_eq!(backend.changes_tries_storage.root(&anchor, 6).unwrap(), Some(root6)); - let root7 = read_changes_trie_root(&backend, 7); assert_eq!(backend.changes_tries_storage.root(&anchor, 7).unwrap(), Some(root7)); - let root8 = read_changes_trie_root(&backend, 8); assert_eq!(backend.changes_tries_storage.root(&anchor, 8).unwrap(), Some(root8)); - let root9 = read_changes_trie_root(&backend, 9); assert_eq!(backend.changes_tries_storage.root(&anchor, 9).unwrap(), Some(root9)); - let root10 = read_changes_trie_root(&backend, 10); assert_eq!(backend.changes_tries_storage.root(&anchor, 10).unwrap(), Some(root10)); - let root11 = read_changes_trie_root(&backend, 11); assert_eq!(backend.changes_tries_storage.root(&anchor, 11).unwrap(), Some(root11)); - let root12 = read_changes_trie_root(&backend, 12); assert_eq!(backend.changes_tries_storage.root(&anchor, 12).unwrap(), Some(root12)); - - // now simulate finalization of block#12, causing prune of tries at #1..#4 - let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(&config, &mut tx, Default::default(), 12); - backend.storage.db.write(tx).unwrap(); - assert!(backend.changes_tries_storage.get(&root1, EMPTY_PREFIX).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root2, EMPTY_PREFIX).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root3, EMPTY_PREFIX).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root4, EMPTY_PREFIX).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root5, EMPTY_PREFIX).unwrap().is_some()); - assert!(backend.changes_tries_storage.get(&root6, EMPTY_PREFIX).unwrap().is_some()); - assert!(backend.changes_tries_storage.get(&root7, EMPTY_PREFIX).unwrap().is_some()); - assert!(backend.changes_tries_storage.get(&root8, EMPTY_PREFIX).unwrap().is_some()); - - // now simulate finalization of block#16, causing prune of tries at #5..#8 - let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(&config, &mut tx, Default::default(), 16); - backend.storage.db.write(tx).unwrap(); - assert!(backend.changes_tries_storage.get(&root5, EMPTY_PREFIX).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root6, EMPTY_PREFIX).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root7, EMPTY_PREFIX).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root8, EMPTY_PREFIX).unwrap().is_none()); - - // now "change" pruning mode to archive && simulate finalization of block#20 - // => no changes tries are pruned, because we never prune in archive mode - backend.changes_tries_storage.min_blocks_to_keep = None; - let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(&config, &mut tx, Default::default(), 20); - backend.storage.db.write(tx).unwrap(); - assert!(backend.changes_tries_storage.get(&root9, EMPTY_PREFIX).unwrap().is_some()); - assert!(backend.changes_tries_storage.get(&root10, EMPTY_PREFIX).unwrap().is_some()); - assert!(backend.changes_tries_storage.get(&root11, EMPTY_PREFIX).unwrap().is_some()); - assert!(backend.changes_tries_storage.get(&root12, EMPTY_PREFIX).unwrap().is_some()); - } - - #[test] - fn changes_tries_without_digest_are_pruned_on_finalization() { - let mut backend = Backend::::new_test(1000, 100); - backend.changes_tries_storage.min_blocks_to_keep = Some(4); - let config = ChangesTrieConfiguration { - digest_interval: 0, - digest_levels: 0, - }; - - // insert some blocks - let block0 = insert_header(&backend, 0, Default::default(), vec![(b"key_at_0".to_vec(), b"val_at_0".to_vec())], Default::default()); - let block1 = insert_header(&backend, 1, block0, vec![(b"key_at_1".to_vec(), b"val_at_1".to_vec())], Default::default()); - let block2 = insert_header(&backend, 2, block1, vec![(b"key_at_2".to_vec(), b"val_at_2".to_vec())], Default::default()); - let block3 = insert_header(&backend, 3, block2, vec![(b"key_at_3".to_vec(), b"val_at_3".to_vec())], Default::default()); - let block4 = insert_header(&backend, 4, block3, vec![(b"key_at_4".to_vec(), b"val_at_4".to_vec())], Default::default()); - let block5 = insert_header(&backend, 5, block4, vec![(b"key_at_5".to_vec(), b"val_at_5".to_vec())], Default::default()); - let block6 = insert_header(&backend, 6, block5, vec![(b"key_at_6".to_vec(), b"val_at_6".to_vec())], Default::default()); - - // check that roots of all tries are in the columns::CHANGES_TRIE - let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block6, number: 6 }; - fn read_changes_trie_root(backend: &Backend, num: u64) -> H256 { - backend.blockchain().header(BlockId::Number(num)).unwrap().unwrap().digest().logs().iter() - .find(|i| i.as_changes_trie_root().is_some()).unwrap().as_changes_trie_root().unwrap().clone() - } - - let root1 = read_changes_trie_root(&backend, 1); assert_eq!(backend.changes_tries_storage.root(&anchor, 1).unwrap(), Some(root1)); - let root2 = read_changes_trie_root(&backend, 2); assert_eq!(backend.changes_tries_storage.root(&anchor, 2).unwrap(), Some(root2)); - let root3 = read_changes_trie_root(&backend, 3); assert_eq!(backend.changes_tries_storage.root(&anchor, 3).unwrap(), Some(root3)); - let root4 = read_changes_trie_root(&backend, 4); assert_eq!(backend.changes_tries_storage.root(&anchor, 4).unwrap(), Some(root4)); - let root5 = read_changes_trie_root(&backend, 5); assert_eq!(backend.changes_tries_storage.root(&anchor, 5).unwrap(), Some(root5)); - let root6 = read_changes_trie_root(&backend, 6); assert_eq!(backend.changes_tries_storage.root(&anchor, 6).unwrap(), Some(root6)); - - // now simulate finalization of block#5, causing prune of trie at #1 - let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(&config, &mut tx, block5, 5); - backend.storage.db.write(tx).unwrap(); - assert!(backend.changes_tries_storage.get(&root1, EMPTY_PREFIX).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root2, EMPTY_PREFIX).unwrap().is_some()); - - // now simulate finalization of block#6, causing prune of tries at #2 - let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(&config, &mut tx, block6, 6); - backend.storage.db.write(tx).unwrap(); - assert!(backend.changes_tries_storage.get(&root2, EMPTY_PREFIX).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root3, EMPTY_PREFIX).unwrap().is_some()); - } - #[test] fn tree_route_works() { let backend = Backend::::new_test(1000, 100); let blockchain = backend.blockchain(); - let block0 = insert_header(&backend, 0, Default::default(), Vec::new(), Default::default()); + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); // fork from genesis: 3 prong. - let a1 = insert_header(&backend, 1, block0, Vec::new(), Default::default()); - let a2 = insert_header(&backend, 2, a1, Vec::new(), Default::default()); - let a3 = insert_header(&backend, 3, a2, Vec::new(), Default::default()); + let a1 = insert_header(&backend, 1, block0, None, Default::default()); + let a2 = insert_header(&backend, 2, a1, None, Default::default()); + let a3 = insert_header(&backend, 3, a2, None, Default::default()); // fork from genesis: 2 prong. - let b1 = insert_header(&backend, 1, block0, Vec::new(), H256::from([1; 32])); - let b2 = insert_header(&backend, 2, b1, Vec::new(), Default::default()); + let b1 = insert_header(&backend, 1, block0, None, H256::from([1; 32])); + let b2 = insert_header(&backend, 2, b1, None, Default::default()); { let tree_route = tree_route(blockchain, a3, b2).unwrap(); @@ -2397,8 +2062,8 @@ mod tests { let backend = Backend::::new_test(1000, 100); let blockchain = backend.blockchain(); - let block0 = insert_header(&backend, 0, Default::default(), Vec::new(), Default::default()); - let block1 = insert_header(&backend, 1, block0, Vec::new(), Default::default()); + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); + let block1 = insert_header(&backend, 1, block0, None, Default::default()); { let tree_route = tree_route(blockchain, block0, block1).unwrap(); @@ -2413,16 +2078,16 @@ mod tests { fn lowest_common_ancestor_works() { let backend = Backend::::new_test(1000, 100); let blockchain = backend.blockchain(); - let block0 = insert_header(&backend, 0, Default::default(), Vec::new(), Default::default()); + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); // fork from genesis: 3 prong. - let a1 = insert_header(&backend, 1, block0, Vec::new(), Default::default()); - let a2 = insert_header(&backend, 2, a1, Vec::new(), Default::default()); - let a3 = insert_header(&backend, 3, a2, Vec::new(), Default::default()); + let a1 = insert_header(&backend, 1, block0, None, Default::default()); + let a2 = insert_header(&backend, 2, a1, None, Default::default()); + let a3 = insert_header(&backend, 3, a2, None, Default::default()); // fork from genesis: 2 prong. - let b1 = insert_header(&backend, 1, block0, Vec::new(), H256::from([1; 32])); - let b2 = insert_header(&backend, 2, b1, Vec::new(), Default::default()); + let b1 = insert_header(&backend, 1, block0, None, H256::from([1; 32])); + let b2 = insert_header(&backend, 2, b1, None, Default::default()); { let lca = lowest_common_ancestor(blockchain, a3, b2).unwrap(); @@ -2479,14 +2144,14 @@ mod tests { let backend = Backend::::new_test(10000, 10000); let blockchain = backend.blockchain(); - let genesis = insert_header(&backend, 0, Default::default(), Vec::new(), Default::default()); + let genesis = insert_header(&backend, 0, Default::default(), None, Default::default()); let block100 = (1..=100).fold(genesis, |parent, n| { - insert_header(&backend, n, parent, Vec::new(), Default::default()) + insert_header(&backend, n, parent, None, Default::default()) }); let block7000 = (101..=7000).fold(block100, |parent, n| { - insert_header(&backend, n, parent, Vec::new(), Default::default()) + insert_header(&backend, n, parent, None, Default::default()) }); // This will cause the ancestor of `block100` to be set to `genesis` as a side-effect. @@ -2522,17 +2187,17 @@ mod tests { #[test] fn test_leaves_pruned_on_finality() { let backend: Backend = Backend::new_test(10, 10); - let block0 = insert_header(&backend, 0, Default::default(), Default::default(), Default::default()); + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - let block1_a = insert_header(&backend, 1, block0, Default::default(), Default::default()); - let block1_b = insert_header(&backend, 1, block0, Default::default(), [1; 32].into()); - let block1_c = insert_header(&backend, 1, block0, Default::default(), [2; 32].into()); + let block1_a = insert_header(&backend, 1, block0, None, Default::default()); + let block1_b = insert_header(&backend, 1, block0, None, [1; 32].into()); + let block1_c = insert_header(&backend, 1, block0, None, [2; 32].into()); assert_eq!(backend.blockchain().leaves().unwrap(), vec![block1_a, block1_b, block1_c]); - let block2_a = insert_header(&backend, 2, block1_a, Default::default(), Default::default()); - let block2_b = insert_header(&backend, 2, block1_b, Default::default(), Default::default()); - let block2_c = insert_header(&backend, 2, block1_b, Default::default(), [1; 32].into()); + let block2_a = insert_header(&backend, 2, block1_a, None, Default::default()); + let block2_b = insert_header(&backend, 2, block1_b, None, Default::default()); + let block2_c = insert_header(&backend, 2, block1_b, None, [1; 32].into()); assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a, block2_b, block2_c, block1_c]); @@ -2559,8 +2224,8 @@ mod tests { let backend = Backend::::new_test(10, 10); - let block0 = insert_header(&backend, 0, Default::default(), Default::default(), Default::default()); - let _ = insert_header(&backend, 1, block0, Default::default(), Default::default()); + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); + let _ = insert_header(&backend, 1, block0, None, Default::default()); let justification = Some(vec![1, 2, 3]); backend.finalize_block(BlockId::Number(1), justification.clone()).unwrap(); @@ -2575,9 +2240,11 @@ mod tests { fn test_finalize_multiple_blocks_in_single_op() { let backend = Backend::::new_test(10, 10); - let block0 = insert_header(&backend, 0, Default::default(), Default::default(), Default::default()); - let block1 = insert_header(&backend, 1, block0, Default::default(), Default::default()); - let block2 = insert_header(&backend, 2, block1, Default::default(), Default::default()); + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); + let block1 = insert_header(&backend, 1, block0, None, Default::default()); + let block2 = insert_header(&backend, 2, block1, None, Default::default()); + let block3 = insert_header(&backend, 3, block2, None, Default::default()); + let block4 = insert_header(&backend, 4, block3, None, Default::default()); { let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, BlockId::Hash(block0)).unwrap(); @@ -2585,15 +2252,22 @@ mod tests { op.mark_finalized(BlockId::Hash(block2), None).unwrap(); backend.commit_operation(op).unwrap(); } + { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(block2)).unwrap(); + op.mark_finalized(BlockId::Hash(block3), None).unwrap(); + op.mark_finalized(BlockId::Hash(block4), None).unwrap(); + backend.commit_operation(op).unwrap(); + } } #[test] fn test_finalize_non_sequential() { let backend = Backend::::new_test(10, 10); - let block0 = insert_header(&backend, 0, Default::default(), Default::default(), Default::default()); - let block1 = insert_header(&backend, 1, block0, Default::default(), Default::default()); - let block2 = insert_header(&backend, 2, block1, Default::default(), Default::default()); + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); + let block1 = insert_header(&backend, 1, block0, None, Default::default()); + let block2 = insert_header(&backend, 2, block1, None, Default::default()); { let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, BlockId::Hash(block0)).unwrap(); diff --git a/client/db/src/light.rs b/client/db/src/light.rs index 8277f20b8d8ea..e663fc5699490 100644 --- a/client/db/src/light.rs +++ b/client/db/src/light.rs @@ -38,7 +38,7 @@ use codec::{Decode, Encode}; use sp_runtime::generic::{DigestItem, BlockId}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Zero, One, NumberFor, HasherFor}; use crate::cache::{DbCacheSync, DbCache, ComplexBlockId, EntryType as CacheEntryType}; -use crate::utils::{self, meta_keys, Meta, db_err, read_db, block_id_to_lookup_key, read_meta}; +use crate::utils::{self, meta_keys, DatabaseType, Meta, db_err, read_db, block_id_to_lookup_key, read_meta}; use crate::{DatabaseSettings, FrozenForDuration}; use log::{trace, warn, debug}; @@ -68,13 +68,10 @@ pub struct LightStorage { io_stats: FrozenForDuration, } -impl LightStorage - where - Block: BlockT, -{ +impl LightStorage { /// Create new storage with given settings. pub fn new(config: DatabaseSettings) -> ClientResult { - let db = crate::utils::open_database(&config, columns::META, "light")?; + let db = crate::utils::open_database::(&config, DatabaseType::Light)?; Self::from_kvdb(db as Arc<_>) } @@ -89,7 +86,7 @@ impl LightStorage } fn from_kvdb(db: Arc) -> ClientResult { - let meta = read_meta::(&*db, columns::META, columns::HEADER)?; + let meta = read_meta::(&*db, columns::HEADER)?; let cache = DbCache::new( db.clone(), columns::KEY_LOOKUP, @@ -417,7 +414,7 @@ impl LightBlockchainStorage for LightStorage fn import_header( &self, header: Block::Header, - cache_at: HashMap>, + mut cache_at: HashMap>, leaf_state: NewBlockState, aux_ops: Vec<(Vec, Option>)>, ) -> ClientResult<()> { @@ -475,6 +472,13 @@ impl LightBlockchainStorage for LightStorage )?; } + // update changes trie configuration cache + if !cache_at.contains_key(&well_known_cache_keys::CHANGES_TRIE_CONFIG) { + if let Some(new_configuration) = crate::changes_tries_storage::extract_new_configuration(&header) { + cache_at.insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_configuration.encode()); + } + } + { let mut cache = self.cache.0.write(); let cache_ops = cache.transaction(&mut transaction) @@ -487,8 +491,11 @@ impl LightBlockchainStorage for LightStorage .into_ops(); debug!("Light DB Commit {:?} ({})", hash, number); + self.db.write(transaction).map_err(db_err)?; - cache.commit(cache_ops); + cache.commit(cache_ops) + .expect("only fails if cache with given name isn't loaded yet;\ + cache is already loaded because there are cache_ops; qed"); } self.update_meta(hash, number, leaf_state.is_best(), finalized); @@ -543,7 +550,9 @@ impl LightBlockchainStorage for LightStorage .into_ops(); self.db.write(transaction).map_err(db_err)?; - cache.commit(cache_ops); + cache.commit(cache_ops) + .expect("only fails if cache with given name isn't loaded yet;\ + cache is already loaded because there are cache_ops; qed"); } self.update_meta(hash, header.number().clone(), false, true); @@ -603,7 +612,8 @@ fn cht_key>(cht_type: u8, block: N) -> ClientResult<[u8; 5]> { #[cfg(test)] pub(crate) mod tests { use sc_client::cht; - use sp_runtime::generic::DigestItem; + use sp_core::ChangesTrieConfiguration; + use sp_runtime::generic::{DigestItem, ChangesTrieSignal}; use sp_runtime::testing::{H256 as Hash, Header, Block as RawBlock, ExtrinsicWrapper}; use sp_blockchain::{lowest_common_ancestor, tree_route}; use super::*; @@ -964,7 +974,7 @@ pub(crate) mod tests { } fn get_authorities(cache: &dyn BlockchainCache, at: BlockId) -> Option> { - cache.get_at(&well_known_cache_keys::AUTHORITIES, &at) + cache.get_at(&well_known_cache_keys::AUTHORITIES, &at).unwrap_or(None) .and_then(|(_, _, val)| Decode::decode(&mut &val[..]).ok()) } @@ -1148,8 +1158,8 @@ pub(crate) mod tests { let (genesis_hash, storage) = { let db = LightStorage::::new_test(); - // before cache is initialized => None - assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)), None); + // before cache is initialized => Err + assert!(db.cache().get_at(b"test", &BlockId::Number(0)).is_err()); // insert genesis block (no value for cache is provided) let mut genesis_hash = None; @@ -1160,14 +1170,14 @@ pub(crate) mod tests { }); // after genesis is inserted => None - assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)), None); + assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)).unwrap(), None); // initialize cache db.cache().initialize(b"test", vec![42]).unwrap(); // after genesis is inserted + cache is initialized => Some assert_eq!( - db.cache().get_at(b"test", &BlockId::Number(0)), + db.cache().get_at(b"test", &BlockId::Number(0)).unwrap(), Some(((0, genesis_hash.unwrap()), None, vec![42])), ); @@ -1177,8 +1187,37 @@ pub(crate) mod tests { // restart && check that after restart value is read from the cache let db = LightStorage::::from_kvdb(storage as Arc<_>).expect("failed to create test-db"); assert_eq!( - db.cache().get_at(b"test", &BlockId::Number(0)), + db.cache().get_at(b"test", &BlockId::Number(0)).unwrap(), Some(((0, genesis_hash.unwrap()), None, vec![42])), ); } + + #[test] + fn changes_trie_configuration_is_tracked_on_light_client() { + let db = LightStorage::::new_test(); + + let new_config = Some(ChangesTrieConfiguration::new(2, 2)); + + // insert block#0 && block#1 (no value for cache is provided) + let hash0 = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); + assert_eq!( + db.cache().get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, &BlockId::Number(0)).unwrap() + .map(|(_, _, v)| ChangesTrieConfiguration::decode(&mut &v[..]).unwrap()), + None, + ); + + // insert configuration at block#1 (starts from block#2) + insert_block(&db, HashMap::new(), || { + let mut header = default_header(&hash0, 1); + header.digest_mut().push( + DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration(new_config.clone())) + ); + header + }); + assert_eq!( + db.cache().get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, &BlockId::Number(1)).unwrap() + .map(|(_, _, v)| Option::::decode(&mut &v[..]).unwrap()), + Some(new_config), + ); + } } diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 4f7e906a0ae28..fd85a899b628e 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -24,17 +24,17 @@ use hash_db::Hasher; use sp_runtime::traits::{Block as BlockT, Header, HasherFor, NumberFor}; use sp_core::hexdisplay::HexDisplay; use sp_core::storage::ChildInfo; -use sp_state_machine::{backend::Backend as StateBackend, TrieBackend}; +use sp_state_machine::{ + backend::Backend as StateBackend, TrieBackend, StorageKey, StorageValue, + StorageCollection, ChildStorageCollection, +}; use log::trace; -use sc_client_api::backend::{StorageCollection, ChildStorageCollection}; use std::hash::Hash as StdHash; use crate::stats::StateUsageStats; const STATE_CACHE_BLOCKS: usize = 12; -type StorageKey = Vec; type ChildStorageKey = (Vec, Vec); -type StorageValue = Vec; /// Shared canonical state cache. pub struct Cache { diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs new file mode 100644 index 0000000000000..ab2d4bbf799b2 --- /dev/null +++ b/client/db/src/upgrade.rs @@ -0,0 +1,198 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Database upgrade logic. + +use std::fs; +use std::io::{Read, Write, ErrorKind}; +use std::path::{Path, PathBuf}; +use std::sync::Arc; + +use codec::Encode; +use kvdb_rocksdb::{Database, DatabaseConfig}; +use parking_lot::RwLock; +use sp_blockchain::{well_known_cache_keys, Cache}; +use sp_core::ChangesTrieConfiguration; +use sp_runtime::traits::Block as BlockT; +use crate::{ + cache::{ComplexBlockId, DbCache, DbCacheSync}, + utils::{DatabaseType, check_database_type, db_err, read_genesis_hash}, +}; + +/// Version file name. +const VERSION_FILE_NAME: &'static str = "db_version"; + +/// Current db version. +const CURRENT_VERSION: u32 = 1; + +/// Number of columns in v0. +const V0_NUM_COLUMNS: u32 = 10; + +/// Upgrade database to current version. +pub fn upgrade_db(db_path: &Path, db_type: DatabaseType) -> sp_blockchain::Result<()> { + let db_version = current_version(db_path)?; + match db_version { + 0 => migrate_0_to_1::(db_path, db_type)?, + 1 => (), + _ => Err(sp_blockchain::Error::Backend(format!("Future database version: {}", db_version)))?, + } + + update_version(db_path) +} + +/// Migration from version0 to version1: +/// 1) the number of columns has changed from 10 to 11; +/// 2) changes tries configuration are now cached. +fn migrate_0_to_1(db_path: &Path, db_type: DatabaseType) -> sp_blockchain::Result<()> { + { + let db = open_database(db_path, db_type, V0_NUM_COLUMNS)?; + db.add_column().map_err(db_err)?; + db.flush().map_err(db_err)?; + } + + let db = open_database(db_path, db_type, V0_NUM_COLUMNS + 1)?; + + const V0_FULL_KEY_LOOKUP_COLUMN: u32 = 3; + const V0_FULL_HEADER_COLUMN: u32 = 4; + const V0_FULL_CACHE_COLUMN: u32 = 10; // that's the column we have just added + const V0_LIGHT_KEY_LOOKUP_COLUMN: u32 = 1; + const V0_LIGHT_HEADER_COLUMN: u32 = 2; + const V0_LIGHT_CACHE_COLUMN: u32 = 3; + + let (key_lookup_column, header_column, cache_column) = match db_type { + DatabaseType::Full => ( + V0_FULL_KEY_LOOKUP_COLUMN, + V0_FULL_HEADER_COLUMN, + V0_FULL_CACHE_COLUMN, + ), + DatabaseType::Light => ( + V0_LIGHT_KEY_LOOKUP_COLUMN, + V0_LIGHT_HEADER_COLUMN, + V0_LIGHT_CACHE_COLUMN, + ), + }; + + let genesis_hash: Option = read_genesis_hash(&db)?; + if let Some(genesis_hash) = genesis_hash { + let cache: DbCacheSync = DbCacheSync(RwLock::new(DbCache::new( + Arc::new(db), + key_lookup_column, + header_column, + cache_column, + genesis_hash, + ComplexBlockId::new(genesis_hash, 0.into()), + ))); + let changes_trie_config: Option = None; + cache.initialize(&well_known_cache_keys::CHANGES_TRIE_CONFIG, changes_trie_config.encode())?; + } + + Ok(()) +} + +/// Reads current database version from the file at given path. +/// If the file does not exist returns 0. +fn current_version(path: &Path) -> sp_blockchain::Result { + let unknown_version_err = || sp_blockchain::Error::Backend("Unknown database version".into()); + + match fs::File::open(version_file_path(path)) { + Err(ref err) if err.kind() == ErrorKind::NotFound => Ok(0), + Err(_) => Err(unknown_version_err()), + Ok(mut file) => { + let mut s = String::new(); + file.read_to_string(&mut s).map_err(|_| unknown_version_err())?; + u32::from_str_radix(&s, 10).map_err(|_| unknown_version_err()) + }, + } +} + +/// Opens database of givent type with given number of columns. +fn open_database(db_path: &Path, db_type: DatabaseType, db_columns: u32) -> sp_blockchain::Result { + let db_path = db_path.to_str() + .ok_or_else(|| sp_blockchain::Error::Backend("Invalid database path".into()))?; + let db_cfg = DatabaseConfig::with_columns(db_columns); + let db = Database::open(&db_cfg, db_path).map_err(db_err)?; + check_database_type(&db, db_type)?; + Ok(db) +} + +/// Writes current database version to the file. +/// Creates a new file if the version file does not exist yet. +fn update_version(path: &Path) -> sp_blockchain::Result<()> { + fs::create_dir_all(path).map_err(db_err)?; + let mut file = fs::File::create(version_file_path(path)).map_err(db_err)?; + file.write_all(format!("{}", CURRENT_VERSION).as_bytes()).map_err(db_err)?; + Ok(()) +} + +/// Returns the version file path. +fn version_file_path(path: &Path) -> PathBuf { + let mut file_path = path.to_owned(); + file_path.push(VERSION_FILE_NAME); + file_path +} + +#[cfg(test)] +mod tests { + use sc_state_db::PruningMode; + use crate::{DatabaseSettings, DatabaseSettingsSrc}; + use crate::tests::Block; + use super::*; + + fn create_db(db_path: &Path, version: Option) { + let db_cfg = DatabaseConfig::with_columns(V0_NUM_COLUMNS); + Database::open(&db_cfg, db_path.to_str().unwrap()).unwrap(); + if let Some(version) = version { + fs::create_dir_all(db_path).unwrap(); + let mut file = fs::File::create(version_file_path(db_path)).unwrap(); + file.write_all(format!("{}", version).as_bytes()).unwrap(); + } + } + + fn open_database(db_path: &Path) -> sp_blockchain::Result<()> { + crate::utils::open_database::(&DatabaseSettings { + state_cache_size: 0, + state_cache_child_ratio: None, + pruning: PruningMode::ArchiveAll, + source: DatabaseSettingsSrc::Path { path: db_path.to_owned(), cache_size: None }, + }, DatabaseType::Full).map(|_| ()) + } + + #[test] + fn downgrade_never_happens() { + let db_dir = tempdir::TempDir::new("").unwrap(); + create_db(db_dir.path(), Some(CURRENT_VERSION + 1)); + assert!(open_database(db_dir.path()).is_err()); + } + + #[test] + fn open_empty_database_works() { + let db_dir = tempdir::TempDir::new("").unwrap(); + open_database(db_dir.path()).unwrap(); + open_database(db_dir.path()).unwrap(); + assert_eq!(current_version(db_dir.path()).unwrap(), CURRENT_VERSION); + } + + #[test] + fn upgrade_from_0_to_1_works() { + for version_from_file in &[None, Some(0)] { + let db_dir = tempdir::TempDir::new("").unwrap(); + let db_path = db_dir.path(); + create_db(db_path, *version_from_file); + open_database(db_path).unwrap(); + assert_eq!(current_version(db_path).unwrap(), CURRENT_VERSION); + } + } +} diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index ed7b2220aacd5..f7f51d7f6de5d 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -21,7 +21,7 @@ use std::sync::Arc; use std::{io, convert::TryInto}; use kvdb::{KeyValueDB, DBTransaction}; -#[cfg(feature = "kvdb-rocksdb")] +#[cfg(any(feature = "kvdb-rocksdb", test))] use kvdb_rocksdb::{Database, DatabaseConfig}; use log::debug; @@ -36,7 +36,7 @@ use crate::{DatabaseSettings, DatabaseSettingsSrc}; /// Number of columns in the db. Must be the same for both full && light dbs. /// Otherwise RocksDb will fail to open database && check its type. -pub const NUM_COLUMNS: u32 = 10; +pub const NUM_COLUMNS: u32 = 11; /// Meta column. The set of keys in the column is shared by full && light storages. pub const COLUMN_META: u32 = 0; @@ -50,6 +50,8 @@ pub mod meta_keys { pub const FINALIZED_BLOCK: &[u8; 5] = b"final"; /// Meta information prefix for list-based caches. pub const CACHE_META_PREFIX: &[u8; 5] = b"cache"; + /// Meta information for changes tries key. + pub const CHANGES_TRIES_META: &[u8; 5] = b"ctrie"; /// Genesis block hash. pub const GENESIS_HASH: &[u8; 3] = b"gen"; /// Leaves prefix list key. @@ -76,6 +78,15 @@ pub struct Meta { /// A block lookup key: used for canonical lookup from block number to hash pub type NumberIndexKey = [u8; 4]; +/// Database type. +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum DatabaseType { + /// Full node database. + Full, + /// Light node database. + Light, +} + /// Convert block number into short lookup key (LE representation) for /// blocks that are in the canonical chain. /// @@ -203,14 +214,17 @@ pub fn db_err(err: io::Error) -> sp_blockchain::Error { } /// Open RocksDB database. -pub fn open_database( +pub fn open_database( config: &DatabaseSettings, - col_meta: u32, - db_type: &str + db_type: DatabaseType, ) -> sp_blockchain::Result> { let db: Arc = match &config.source { - #[cfg(feature = "kvdb-rocksdb")] + #[cfg(any(feature = "kvdb-rocksdb", test))] DatabaseSettingsSrc::Path { path, cache_size } => { + // first upgrade database to required version + crate::upgrade::upgrade_db::(&path, db_type)?; + + // and now open database assuming that it has the latest version let mut db_config = DatabaseConfig::with_columns(NUM_COLUMNS); if let Some(cache_size) = cache_size { @@ -232,7 +246,7 @@ pub fn open_database( .ok_or_else(|| sp_blockchain::Error::Backend("Invalid database path".into()))?; Arc::new(Database::open(&db_config, &path).map_err(db_err)?) }, - #[cfg(not(feature = "kvdb-rocksdb"))] + #[cfg(not(any(feature = "kvdb-rocksdb", test)))] DatabaseSettingsSrc::Path { .. } => { let msg = "Try to open RocksDB database with RocksDB disabled".into(); return Err(sp_blockchain::Error::Backend(msg)); @@ -240,22 +254,28 @@ pub fn open_database( DatabaseSettingsSrc::Custom(db) => db.clone(), }; - // check database type - match db.get(col_meta, meta_keys::TYPE).map_err(db_err)? { + check_database_type(&*db, db_type)?; + + Ok(db) +} + +/// Check database type. +pub fn check_database_type(db: &dyn KeyValueDB, db_type: DatabaseType) -> sp_blockchain::Result<()> { + match db.get(COLUMN_META, meta_keys::TYPE).map_err(db_err)? { Some(stored_type) => { - if db_type.as_bytes() != &*stored_type { + if db_type.as_str().as_bytes() != &*stored_type { return Err(sp_blockchain::Error::Backend( - format!("Unexpected database type. Expected: {}", db_type)).into()); + format!("Unexpected database type. Expected: {}", db_type.as_str())).into()); } }, None => { let mut transaction = DBTransaction::new(); - transaction.put(col_meta, meta_keys::TYPE, db_type.as_bytes()); + transaction.put(COLUMN_META, meta_keys::TYPE, db_type.as_str().as_bytes()); db.write(transaction).map_err(db_err)?; }, } - Ok(db) + Ok(()) } /// Read database column entry for the given block. @@ -304,20 +324,15 @@ pub fn require_header( } /// Read meta from the database. -pub fn read_meta(db: &dyn KeyValueDB, col_meta: u32, col_header: u32) -> Result< +pub fn read_meta(db: &dyn KeyValueDB, col_header: u32) -> Result< Meta<<::Header as HeaderT>::Number, Block::Hash>, sp_blockchain::Error, > where Block: BlockT, { - let genesis_hash: Block::Hash = match db.get(col_meta, meta_keys::GENESIS_HASH).map_err(db_err)? { - Some(h) => match Decode::decode(&mut &h[..]) { - Ok(h) => h, - Err(err) => return Err(sp_blockchain::Error::Backend( - format!("Error decoding genesis hash: {}", err) - )), - }, + let genesis_hash: Block::Hash = match read_genesis_hash(db)? { + Some(genesis_hash) => genesis_hash, None => return Ok(Meta { best_hash: Default::default(), best_number: Zero::zero(), @@ -328,7 +343,7 @@ pub fn read_meta(db: &dyn KeyValueDB, col_meta: u32, col_header: u32) -> }; let load_meta_block = |desc, key| -> Result<_, sp_blockchain::Error> { - if let Some(Some(header)) = db.get(col_meta, key).and_then(|id| + if let Some(Some(header)) = db.get(COLUMN_META, key).and_then(|id| match id { Some(id) => db.get(col_header, &id).map(|h| h.map(|b| Block::Header::decode(&mut &b[..]).ok())), None => Ok(None), @@ -354,6 +369,29 @@ pub fn read_meta(db: &dyn KeyValueDB, col_meta: u32, col_header: u32) -> }) } +/// Read genesis hash from database. +pub fn read_genesis_hash(db: &dyn KeyValueDB) -> sp_blockchain::Result> { + match db.get(COLUMN_META, meta_keys::GENESIS_HASH).map_err(db_err)? { + Some(h) => match Decode::decode(&mut &h[..]) { + Ok(h) => Ok(Some(h)), + Err(err) => Err(sp_blockchain::Error::Backend( + format!("Error decoding genesis hash: {}", err) + )), + }, + None => Ok(None), + } +} + +impl DatabaseType { + /// Returns str representation of the type. + pub fn as_str(&self) -> &'static str { + match *self { + DatabaseType::Full => "full", + DatabaseType::Light => "light", + } + } +} + #[cfg(test)] mod tests { use super::*; @@ -368,4 +406,10 @@ mod tests { _ => unreachable!(), }; } + + #[test] + fn database_type_as_str_works() { + assert_eq!(DatabaseType::Full.as_str(), "full"); + assert_eq!(DatabaseType::Light.as_str(), "light"); + } } diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index a2f6ebaac0e3c..aa8c46999d98d 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-executor" -version = "2.0.0" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" @@ -18,10 +18,10 @@ parity-wasm = "0.41.0" lazy_static = "1.4.0" sp-wasm-interface = { version = "2.0.0", path = "../../primitives/wasm-interface" } sp-runtime-interface = { version = "2.0.0", path = "../../primitives/runtime-interface" } -sp-externalities = { version = "2.0.0", path = "../../primitives/externalities" } -sc-executor-common = { version = "2.0.0", path = "common" } -sc-executor-wasmi = { version = "2.0.0", path = "wasmi" } -sc-executor-wasmtime = { version = "2.0.0", path = "wasmtime", optional = true } +sp-externalities = { version = "0.8.0", path = "../../primitives/externalities" } +sc-executor-common = { version = "0.8", path = "common" } +sc-executor-wasmi = { version = "0.8", path = "wasmi" } +sc-executor-wasmtime = { version = "0.8", path = "wasmtime", optional = true } parking_lot = "0.9.0" log = "0.4.8" libsecp256k1 = "0.3.4" @@ -32,7 +32,7 @@ wabt = "0.9.2" hex-literal = "0.2.1" sc-runtime-test = { version = "2.0.0", path = "runtime-test" } substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } -sp-state-machine = { version = "2.0.0", path = "../../primitives/state-machine" } +sp-state-machine = { version = "0.8", path = "../../primitives/state-machine" } test-case = "0.3.3" [features] diff --git a/client/executor/common/Cargo.toml b/client/executor/common/Cargo.toml index ddd40de62bbb1..cae0876217993 100644 --- a/client/executor/common/Cargo.toml +++ b/client/executor/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-executor-common" -version = "2.0.0" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" diff --git a/client/executor/common/src/error.rs b/client/executor/common/src/error.rs index fca17bd88c8ea..f7de0af9685d7 100644 --- a/client/executor/common/src/error.rs +++ b/client/executor/common/src/error.rs @@ -57,6 +57,10 @@ pub enum Error { /// Runtime failed. #[display(fmt="Runtime error")] Runtime, + /// Runtime panicked. + #[display(fmt="Runtime panicked: {}", _0)] + #[from(ignore)] + RuntimePanicked(String), /// Invalid memory reference. #[display(fmt="Invalid memory reference")] InvalidMemoryReference, diff --git a/client/executor/common/src/wasm_runtime.rs b/client/executor/common/src/wasm_runtime.rs index 78c05c4c29b68..0733350f4cd6a 100644 --- a/client/executor/common/src/wasm_runtime.rs +++ b/client/executor/common/src/wasm_runtime.rs @@ -17,7 +17,6 @@ //! Definitions for a wasm runtime. use crate::error::Error; -use sp_core::traits::Externalities; use sp_wasm_interface::Function; /// A trait that defines an abstract wasm runtime. @@ -34,6 +33,5 @@ pub trait WasmRuntime { fn host_functions(&self) -> &[&'static dyn Function]; /// Call a method in the Substrate runtime by name. Returns the encoded result on success. - fn call(&mut self, ext: &mut dyn Externalities, method: &str, data: &[u8]) - -> Result, Error>; + fn call(&mut self, method: &str, data: &[u8]) -> Result, Error>; } diff --git a/client/executor/runtime-test/Cargo.toml b/client/executor/runtime-test/Cargo.toml index ba88550a92ef7..835021fec2a11 100644 --- a/client/executor/runtime-test/Cargo.toml +++ b/client/executor/runtime-test/Cargo.toml @@ -8,7 +8,7 @@ build = "build.rs" [dependencies] sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } sp-io = { version = "2.0.0", default-features = false, path = "../../../primitives/io" } -sp-sandbox = { version = "2.0.0", default-features = false, path = "../../../primitives/sandbox" } +sp-sandbox = { version = "0.8.0", default-features = false, path = "../../../primitives/sandbox" } sp-core = { version = "2.0.0", default-features = false, path = "../../../primitives/core" } sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index 2d39cac414545..00f4eb33b02bf 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -83,7 +83,7 @@ fn call_in_wasm( code: &[u8], heap_pages: u64, ) -> crate::error::Result> { - crate::call_in_wasm::( + crate::call_in_wasm::( function, call_data, execution_method, diff --git a/client/executor/src/lib.rs b/client/executor/src/lib.rs index 1908eb3688ea3..4f97efa9b661e 100644 --- a/client/executor/src/lib.rs +++ b/client/executor/src/lib.rs @@ -39,7 +39,7 @@ mod wasm_runtime; mod integration_tests; pub use wasmi; -pub use native_executor::{with_native_environment, NativeExecutor, NativeExecutionDispatch}; +pub use native_executor::{with_externalities_safe, NativeExecutor, NativeExecutionDispatch}; pub use sp_version::{RuntimeVersion, NativeVersion}; pub use codec::Codec; #[doc(hidden)] @@ -57,26 +57,55 @@ pub use sc_executor_common::{error, allocator, sandbox}; /// - `call_data`: Will be given as input parameters to `function` /// - `execution_method`: The execution method to use. /// - `ext`: The externalities that should be set while executing the wasm function. +/// If `None` is given, no externalities will be set. /// - `heap_pages`: The number of heap pages to allocate. /// /// Returns the `Vec` that contains the return value of the function. -pub fn call_in_wasm( +pub fn call_in_wasm( function: &str, call_data: &[u8], execution_method: WasmExecutionMethod, - ext: &mut E, + ext: &mut dyn Externalities, code: &[u8], heap_pages: u64, allow_missing_imports: bool, ) -> error::Result> { - let mut instance = wasm_runtime::create_wasm_runtime_with_code( + call_in_wasm_with_host_functions( + function, + call_data, execution_method, - heap_pages, + ext, code, + heap_pages, HF::host_functions(), allow_missing_imports, + ) +} + +/// Non-generic version of [`call_in_wasm`] that takes the `host_functions` as parameter. +/// For more information please see [`call_in_wasm`]. +pub fn call_in_wasm_with_host_functions( + function: &str, + call_data: &[u8], + execution_method: WasmExecutionMethod, + ext: &mut dyn Externalities, + code: &[u8], + heap_pages: u64, + host_functions: Vec<&'static dyn sp_wasm_interface::Function>, + allow_missing_imports: bool, +) -> error::Result> { + let instance = wasm_runtime::create_wasm_runtime_with_code( + execution_method, + heap_pages, + code, + host_functions, + allow_missing_imports, )?; - instance.call(ext, function, call_data) + + // It is safe, as we delete the instance afterwards. + let mut instance = std::panic::AssertUnwindSafe(instance); + + with_externalities_safe(ext, move || instance.call(function, call_data)).and_then(|r| r) } /// Provides runtime information. @@ -98,7 +127,7 @@ mod tests { fn call_in_interpreted_wasm_works() { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let res = call_in_wasm::<_, sp_io::SubstrateHostFunctions>( + let res = call_in_wasm::( "test_empty_return", &[], WasmExecutionMethod::Interpreted, diff --git a/client/executor/src/native_executor.rs b/client/executor/src/native_executor.rs index 86b274eea0bb9..4fe7a205f53f9 100644 --- a/client/executor/src/native_executor.rs +++ b/client/executor/src/native_executor.rs @@ -22,7 +22,7 @@ use sp_version::{NativeVersion, RuntimeVersion}; use codec::{Decode, Encode}; use sp_core::{NativeOrEncoded, traits::{CodeExecutor, Externalities}}; use log::trace; -use std::{result, cell::RefCell, panic::{UnwindSafe, AssertUnwindSafe}}; +use std::{result, cell::RefCell, panic::{UnwindSafe, AssertUnwindSafe}, sync::Arc}; use sp_wasm_interface::{HostFunctions, Function}; use sc_executor_common::wasm_runtime::WasmRuntime; @@ -33,22 +33,29 @@ thread_local! { /// Default num of pages for the heap const DEFAULT_HEAP_PAGES: u64 = 1024; -pub(crate) fn safe_call(f: F) -> Result - where F: UnwindSafe + FnOnce() -> U -{ - // Substrate uses custom panic hook that terminates process on panic. Disable - // termination for the native call. - let _guard = sp_panic_handler::AbortGuard::force_unwind(); - std::panic::catch_unwind(f).map_err(|_| Error::Runtime) -} - -/// Set up the externalities and safe calling environment to execute calls to a native runtime. +/// Set up the externalities and safe calling environment to execute runtime calls. /// /// If the inner closure panics, it will be caught and return an error. -pub fn with_native_environment(ext: &mut dyn Externalities, f: F) -> Result +pub fn with_externalities_safe(ext: &mut dyn Externalities, f: F) -> Result where F: UnwindSafe + FnOnce() -> U { - sp_externalities::set_and_run_with_externalities(ext, move || safe_call(f)) + sp_externalities::set_and_run_with_externalities( + ext, + move || { + // Substrate uses custom panic hook that terminates process on panic. Disable + // termination for the native call. + let _guard = sp_panic_handler::AbortGuard::force_unwind(); + std::panic::catch_unwind(f).map_err(|e| { + if let Some(err) = e.downcast_ref::() { + Error::RuntimePanicked(err.clone()) + } else if let Some(err) = e.downcast_ref::<&'static str>() { + Error::RuntimePanicked(err.to_string()) + } else { + Error::RuntimePanicked("Unknown panic".into()) + } + }) + }, + ) } /// Delegate for dispatching a CodeExecutor call. @@ -80,7 +87,7 @@ pub struct NativeExecutor { /// The number of 64KB pages to allocate for Wasm execution. default_heap_pages: u64, /// The host functions registered with this instance. - host_functions: Vec<&'static dyn Function>, + host_functions: Arc>, } impl NativeExecutor { @@ -107,7 +114,7 @@ impl NativeExecutor { fallback_method, native_version: D::native_version(), default_heap_pages: default_heap_pages.unwrap_or(DEFAULT_HEAP_PAGES), - host_functions, + host_functions: Arc::new(host_functions), } } @@ -139,7 +146,7 @@ impl NativeExecutor { ext, self.fallback_method, self.default_heap_pages, - &self.host_functions, + &*self.host_functions, )?; let runtime = AssertUnwindSafe(runtime); @@ -181,7 +188,7 @@ impl RuntimeInfo for NativeExecutor { } } -impl CodeExecutor for NativeExecutor { +impl CodeExecutor for NativeExecutor { type Error = Error; fn call @@ -212,13 +219,15 @@ impl CodeExecutor for NativeExecutor { onchain_version, ); - safe_call( - move || runtime.call(&mut **ext, method, data).map(NativeOrEncoded::Encoded) + with_externalities_safe( + &mut **ext, + move || runtime.call(method, data).map(NativeOrEncoded::Encoded) ) } (false, _, _) => { - safe_call( - move || runtime.call(&mut **ext, method, data).map(NativeOrEncoded::Encoded) + with_externalities_safe( + &mut **ext, + move || runtime.call(method, data).map(NativeOrEncoded::Encoded) ) }, (true, true, Some(call)) => { @@ -230,7 +239,7 @@ impl CodeExecutor for NativeExecutor { ); used_native = true; - let res = with_native_environment(&mut **ext, move || (call)()) + let res = with_externalities_safe(&mut **ext, move || (call)()) .and_then(|r| r .map(NativeOrEncoded::Native) .map_err(|s| Error::ApiError(s.to_string())) @@ -255,6 +264,27 @@ impl CodeExecutor for NativeExecutor { } } +impl sp_core::traits::CallInWasm for NativeExecutor { + fn call_in_wasm( + &self, + wasm_blob: &[u8], + method: &str, + call_data: &[u8], + ext: &mut dyn Externalities, + ) -> std::result::Result, String> { + crate::call_in_wasm_with_host_functions( + method, + call_data, + self.fallback_method, + ext, + wasm_blob, + self.default_heap_pages, + (*self.host_functions).clone(), + false, + ).map_err(|e| e.to_string()) + } +} + /// Implements a `NativeExecutionDispatch` for provided parameters. /// /// # Example @@ -318,7 +348,7 @@ macro_rules! native_executor_instance { method: &str, data: &[u8] ) -> $crate::error::Result> { - $crate::with_native_environment(ext, move || $dispatcher(method, data))? + $crate::with_externalities_safe(ext, move || $dispatcher(method, data))? .ok_or_else(|| $crate::error::Error::MethodNotFound(method.to_owned())) } diff --git a/client/executor/src/wasm_runtime.rs b/client/executor/src/wasm_runtime.rs index eef73097f6e7b..036b28f764003 100644 --- a/client/executor/src/wasm_runtime.rs +++ b/client/executor/src/wasm_runtime.rs @@ -223,8 +223,9 @@ fn create_versioned_wasm_runtime( // The following unwind safety assertion is OK because if the method call panics, the // runtime will be dropped. let mut runtime = AssertUnwindSafe(runtime.as_mut()); - crate::native_executor::safe_call( - move || runtime.call(&mut **ext, "Core_version", &[]) + crate::native_executor::with_externalities_safe( + &mut **ext, + move || runtime.call("Core_version", &[]) ).map_err(|_| WasmError::Instantiation("panic in call to get runtime version".into()))? }; let encoded_version = version_result diff --git a/client/executor/wasmi/Cargo.toml b/client/executor/wasmi/Cargo.toml index 7a13d7ea97e26..dbfdc505c647a 100644 --- a/client/executor/wasmi/Cargo.toml +++ b/client/executor/wasmi/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-executor-wasmi" -version = "2.0.0" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" @@ -9,8 +9,7 @@ log = "0.4.8" wasmi = "0.6.2" parity-wasm = "0.41.0" codec = { package = "parity-scale-codec", version = "1.0.0" } -sc-executor-common = { version = "2.0.0", path = "../common" } +sc-executor-common = { version = "0.8", path = "../common" } sp-wasm-interface = { version = "2.0.0", path = "../../../primitives/wasm-interface" } sp-runtime-interface = { version = "2.0.0", path = "../../../primitives/runtime-interface" } sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-externalities = { version = "2.0.0", path = "../../../primitives/externalities" } diff --git a/client/executor/wasmi/src/lib.rs b/client/executor/wasmi/src/lib.rs index 7c6141d6c835f..c10698fea46ad 100644 --- a/client/executor/wasmi/src/lib.rs +++ b/client/executor/wasmi/src/lib.rs @@ -27,7 +27,7 @@ use wasmi::{ memory_units::Pages, RuntimeValue::{I32, I64, self}, }; use codec::{Encode, Decode}; -use sp_core::{sandbox as sandbox_primitives, traits::Externalities}; +use sp_core::sandbox as sandbox_primitives; use log::{error, trace}; use parity_wasm::elements::{deserialize_buffer, DataSegment, Instruction, Module as RawModule}; use sp_wasm_interface::{ @@ -381,7 +381,6 @@ fn get_heap_base(module: &ModuleRef) -> Result { /// Call a given method in the given wasm-module runtime. fn call_in_wasm_module( - ext: &mut dyn Externalities, module_instance: &ModuleRef, method: &str, data: &[u8], @@ -410,13 +409,10 @@ fn call_in_wasm_module( let offset = fec.allocate_memory(data.len() as u32)?; fec.write_memory(offset, data)?; - let result = sp_externalities::set_and_run_with_externalities( - ext, - || module_instance.invoke_export( - method, - &[I32(u32::from(offset) as i32), I32(data.len() as i32)], - &mut fec, - ), + let result = module_instance.invoke_export( + method, + &[I32(u32::from(offset) as i32), I32(data.len() as i32)], + &mut fec, ); match result { @@ -599,7 +595,6 @@ impl WasmRuntime for WasmiRuntime { fn call( &mut self, - ext: &mut dyn Externalities, method: &str, data: &[u8], ) -> Result, Error> { @@ -612,7 +607,6 @@ impl WasmRuntime for WasmiRuntime { e })?; call_in_wasm_module( - ext, &self.instance, method, data, diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index 0d6bede016e66..44912086eaaed 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-executor-wasmtime" -version = "2.0.0" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" @@ -9,11 +9,10 @@ log = "0.4.8" wasmi = "0.6.2" parity-wasm = "0.41.0" codec = { package = "parity-scale-codec", version = "1.0.0" } -sc-executor-common = { version = "2.0.0", path = "../common" } +sc-executor-common = { version = "0.8", path = "../common" } sp-wasm-interface = { version = "2.0.0", path = "../../../primitives/wasm-interface" } sp-runtime-interface = { version = "2.0.0", path = "../../../primitives/runtime-interface" } sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-externalities = { version = "2.0.0", path = "../../../primitives/externalities" } cranelift-codegen = "0.50" cranelift-entity = "0.50" diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs index ae4739d1f1d3f..f9b1b6209c3b7 100644 --- a/client/executor/wasmtime/src/runtime.rs +++ b/client/executor/wasmtime/src/runtime.rs @@ -24,7 +24,6 @@ use sc_executor_common::{ error::{Error, Result, WasmError}, wasm_runtime::WasmRuntime, }; -use sp_core::traits::Externalities; use sp_wasm_interface::{Pointer, WordSize, Function}; use sp_runtime_interface::unpack_ptr_and_len; @@ -70,11 +69,10 @@ impl WasmRuntime for WasmtimeRuntime { &self.host_functions } - fn call(&mut self, ext: &mut dyn Externalities, method: &str, data: &[u8]) -> Result> { + fn call(&mut self, method: &str, data: &[u8]) -> Result> { call_method( &mut self.context, &mut self.module, - ext, method, data, self.heap_pages, @@ -146,7 +144,6 @@ fn create_compiled_unit( fn call_method( context: &mut Context, module: &mut CompiledModule, - ext: &mut dyn Externalities, method: &str, data: &[u8], heap_pages: u32, @@ -176,11 +173,9 @@ fn call_method( let args = [RuntimeValue::I32(u32::from(data_ptr) as i32), RuntimeValue::I32(data_len as i32)]; // Invoke the function in the runtime. - let outcome = sp_externalities::set_and_run_with_externalities(ext, || { - context - .invoke(&mut instance, method, &args[..]) - .map_err(|e| Error::Other(format!("error calling runtime: {}", e))) - })?; + let outcome = context + .invoke(&mut instance, method, &args[..]) + .map_err(|e| Error::Other(format!("error calling runtime: {}", e)))?; let trap_error = reset_env_state_and_take_trap(context, None)?; let (output_ptr, output_len) = match outcome { ActionOutcome::Returned { values } => match values.as_slice() { diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index 9f90834e0df3c..2c0a10857c7f1 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-finality-grandpa" -version = "2.0.0" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" @@ -13,6 +13,7 @@ log = "0.4.8" parking_lot = "0.9.0" rand = "0.7.2" parity-scale-codec = { version = "1.0.0", features = ["derive"] } +sp-arithmetic = { version = "2.0.0", path = "../../primitives/arithmetic" } sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } sp-consensus = { version = "0.8", path = "../../primitives/consensus/common" } sp-core = { version = "2.0.0", path = "../../primitives/core" } @@ -20,23 +21,23 @@ sc-telemetry = { version = "2.0.0", path = "../telemetry" } sc-keystore = { version = "2.0.0", path = "../keystore" } serde_json = "1.0.41" sc-client-api = { version = "2.0.0", path = "../api" } -sc-client = { version = "2.0.0", path = "../" } +sc-client = { version = "0.8", path = "../" } sp-inherents = { version = "2.0.0", path = "../../primitives/inherents" } sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } sc-network = { version = "0.8", path = "../network" } -sc-network-gossip = { version = "2.0.0", path = "../network-gossip" } +sc-network-gossip = { version = "0.8", path = "../network-gossip" } sp-finality-tracker = { version = "2.0.0", path = "../../primitives/finality-tracker" } sp-finality-grandpa = { version = "2.0.0", path = "../../primitives/finality-grandpa" } -finality-grandpa = { version = "0.10.1", features = ["derive-codec"] } +finality-grandpa = { version = "0.10.3", features = ["derive-codec"] } [dev-dependencies] -finality-grandpa = { version = "0.10.1", features = ["derive-codec", "test-helpers"] } +finality-grandpa = { version = "0.10.3", features = ["derive-codec", "test-helpers"] } sc-network = { version = "0.8", path = "../network" } -sc-network-test = { version = "2.0.0", path = "../network/test" } +sc-network-test = { version = "0.8.0", path = "../network/test" } sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } sp-consensus-babe = { version = "0.8", path = "../../primitives/consensus/babe" } -sp-state-machine = { version = "2.0.0", path = "../../primitives/state-machine" } +sp-state-machine = { version = "0.8", path = "../../primitives/state-machine" } env_logger = "0.7.0" tokio = "0.1.22" tempfile = "3.1.0" diff --git a/client/finality-grandpa/src/communication/gossip.rs b/client/finality-grandpa/src/communication/gossip.rs index 1135cc4f8674f..ec74393d80ff1 100644 --- a/client/finality-grandpa/src/communication/gossip.rs +++ b/client/finality-grandpa/src/communication/gossip.rs @@ -83,15 +83,15 @@ //! We only send polite messages to peers, use sp_runtime::traits::{NumberFor, Block as BlockT, Zero}; -use sc_network_gossip::{GossipEngine, MessageIntent, ValidatorContext}; +use sc_network_gossip::{MessageIntent, ValidatorContext}; use sc_network::{config::Roles, PeerId, ReputationChange}; use parity_scale_codec::{Encode, Decode}; use sp_finality_grandpa::AuthorityId; use sc_telemetry::{telemetry, CONSENSUS_DEBUG}; -use log::{trace, debug, warn}; +use log::{trace, debug}; use futures::prelude::*; -use futures::sync::mpsc; +use futures03::channel::mpsc; use rand::seq::SliceRandom; use crate::{environment, CatchUp, CompactCommit, SignedMessage}; @@ -1178,7 +1178,7 @@ impl GossipValidator { pub(super) fn new( config: crate::Config, set_state: environment::SharedVoterSetState, - ) -> (GossipValidator, ReportStream) { + ) -> (GossipValidator, mpsc::UnboundedReceiver) { let (tx, rx) = mpsc::unbounded(); let val = GossipValidator { inner: parking_lot::RwLock::new(Inner::new(config)), @@ -1186,7 +1186,7 @@ impl GossipValidator { report_sender: tx, }; - (val, ReportStream { reports: rx }) + (val, rx) } /// Note a round in the current set has started. @@ -1445,57 +1445,9 @@ impl sc_network_gossip::Validator for GossipValidator, -} - -impl ReportStream { - /// Consume the report stream, converting it into a future that - /// handles all reports. - pub(super) fn consume(self, net: GossipEngine) - -> impl Future + Send + 'static - where - B: BlockT, - { - ReportingTask { - reports: self.reports, - net, - } - } -} - -/// A future for reporting peers. -#[must_use = "Futures do nothing unless polled"] -struct ReportingTask { - reports: mpsc::UnboundedReceiver, - net: GossipEngine, -} - -impl Future for ReportingTask { - type Item = (); - type Error = (); - - fn poll(&mut self) -> Poll<(), ()> { - loop { - match self.reports.poll() { - Err(_) => { - warn!(target: "afg", "Report stream terminated unexpectedly"); - return Ok(Async::Ready(())) - } - Ok(Async::Ready(None)) => return Ok(Async::Ready(())), - Ok(Async::Ready(Some(PeerReport { who, cost_benefit }))) => - self.net.report(who, cost_benefit), - Ok(Async::NotReady) => return Ok(Async::NotReady), - } - } - } +pub(super) struct PeerReport { + pub who: PeerId, + pub cost_benefit: ReputationChange, } #[cfg(test)] diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index b65f340652542..7723047d1b423 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -27,13 +27,19 @@ //! In the future, there will be a fallback for allowing sending the same message //! under certain conditions that are used to un-stick the protocol. -use std::sync::Arc; +use futures::{prelude::*, sync::mpsc}; +use futures03::{ + channel::mpsc as mpsc03, + compat::Compat, + future::{Future as Future03}, + stream::StreamExt, +}; +use log::{debug, trace}; +use parking_lot::Mutex; +use std::{pin::Pin, sync::Arc, task::{Context, Poll as Poll03}}; -use futures::{prelude::*, future::Executor as _, sync::mpsc}; -use futures03::{compat::Compat, stream::StreamExt, future::FutureExt as _, future::TryFutureExt as _}; use finality_grandpa::Message::{Prevote, Precommit, PrimaryPropose}; use finality_grandpa::{voter, voter_set::VoterSet}; -use log::{debug, trace}; use sc_network::{NetworkService, ReputationChange}; use sc_network_gossip::{GossipEngine, Network as GossipNetwork}; use parity_scale_codec::{Encode, Decode}; @@ -47,7 +53,12 @@ use crate::{ }; use crate::environment::HasVoted; use gossip::{ - GossipMessage, FullCatchUpMessage, FullCommitMessage, VoteMessage, GossipValidator + FullCatchUpMessage, + FullCommitMessage, + GossipMessage, + GossipValidator, + PeerReport, + VoteMessage, }; use sp_finality_grandpa::{ AuthorityPair, AuthorityId, AuthoritySignature, SetId as SetIdNumber, RoundNumber, @@ -134,9 +145,31 @@ pub(crate) struct NetworkBridge> { service: N, gossip_engine: GossipEngine, validator: Arc>, + + /// Sender side of the neighbor packet channel. + /// + /// Packets sent into this channel are processed by the `NeighborPacketWorker` and passed on to + /// the underlying `GossipEngine`. neighbor_sender: periodic::NeighborPacketSender, + + /// `NeighborPacketWorker` processing packets sent through the `NeighborPacketSender`. + // + // `NetworkBridge` is required to be clonable, thus one needs to be able to clone its children, + // thus one has to wrap neighor_packet_worker with an `Arc` `Mutex`. + neighbor_packet_worker: Arc>>, + + /// Receiver side of the peer report stream populated by the gossip validator, forwarded to the + /// gossip engine. + // + // `NetworkBridge` is required to be clonable, thus one needs to be able to clone its children, + // thus one has to wrap gossip_validator_report_stream with an `Arc` `Mutex`. Given that it is + // just an `UnboundedReceiver`, one could also switch to a multi-producer-*multi*-consumer + // channel implementation. + gossip_validator_report_stream: Arc>>, } +impl> Unpin for NetworkBridge {} + impl> NetworkBridge { /// Create a new NetworkBridge to the given NetworkService. Returns the service /// handle. @@ -147,7 +180,6 @@ impl> NetworkBridge { config: crate::Config, set_state: crate::environment::SharedVoterSetState, executor: &impl futures03::task::Spawn, - on_exit: impl futures03::Future + Clone + Send + Unpin + 'static, ) -> Self { let (validator, report_stream) = GossipValidator::new( config, @@ -195,16 +227,16 @@ impl> NetworkBridge { } } - let (rebroadcast_job, neighbor_sender) = periodic::neighbor_packet_worker(gossip_engine.clone()); - let reporting_job = report_stream.consume(gossip_engine.clone()); - - let bridge = NetworkBridge { service, gossip_engine, validator, neighbor_sender }; + let (neighbor_packet_worker, neighbor_packet_sender) = periodic::NeighborPacketWorker::new(); - let executor = Compat::new(executor); - executor.execute(Box::new(rebroadcast_job.select(on_exit.clone().map(Ok).compat()).then(|_| Ok(())))) - .expect("failed to spawn grandpa rebroadcast job task"); - executor.execute(Box::new(reporting_job.select(on_exit.clone().map(Ok).compat()).then(|_| Ok(())))) - .expect("failed to spawn grandpa reporting job task"); + let bridge = NetworkBridge { + service, + gossip_engine, + validator, + neighbor_sender: neighbor_packet_sender, + neighbor_packet_worker: Arc::new(Mutex::new(neighbor_packet_worker)), + gossip_validator_report_stream: Arc::new(Mutex::new(report_stream)), + }; bridge } @@ -391,6 +423,38 @@ impl> NetworkBridge { } } +impl> Future03 for NetworkBridge { + type Output = Result<(), Error>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll03 { + loop { + match self.neighbor_packet_worker.lock().poll_next_unpin(cx) { + Poll03::Ready(Some((to, packet))) => { + self.gossip_engine.send_message(to, packet.encode()); + }, + Poll03::Ready(None) => return Poll03::Ready( + Err(Error::Network("Neighbor packet worker stream closed.".into())) + ), + Poll03::Pending => break, + } + } + + loop { + match self.gossip_validator_report_stream.lock().poll_next_unpin(cx) { + Poll03::Ready(Some(PeerReport { who, cost_benefit })) => { + self.gossip_engine.report(who, cost_benefit); + }, + Poll03::Ready(None) => return Poll03::Ready( + Err(Error::Network("Gossip validator report stream closed.".into())) + ), + Poll03::Pending => break, + } + } + + Poll03::Pending + } +} + fn incoming_global( mut gossip_engine: GossipEngine, topic: B::Hash, @@ -530,6 +594,8 @@ impl> Clone for NetworkBridge { gossip_engine: self.gossip_engine.clone(), validator: Arc::clone(&self.validator), neighbor_sender: self.neighbor_sender.clone(), + neighbor_packet_worker: self.neighbor_packet_worker.clone(), + gossip_validator_report_stream: self.gossip_validator_report_stream.clone(), } } } diff --git a/client/finality-grandpa/src/communication/periodic.rs b/client/finality-grandpa/src/communication/periodic.rs index a31203104b61f..d5c8c1e0b856c 100644 --- a/client/finality-grandpa/src/communication/periodic.rs +++ b/client/finality-grandpa/src/communication/periodic.rs @@ -16,21 +16,16 @@ //! Periodic rebroadcast of neighbor packets. -use std::time::{Instant, Duration}; - -use parity_scale_codec::Encode; -use futures::prelude::*; -use futures::sync::mpsc; use futures_timer::Delay; -use futures03::future::{FutureExt as _, TryFutureExt as _}; -use log::{debug, warn}; +use futures03::{channel::mpsc, future::{FutureExt as _}, prelude::*, ready, stream::Stream}; +use log::debug; +use std::{pin::Pin, task::{Context, Poll}, time::{Instant, Duration}}; use sc_network::PeerId; -use sc_network_gossip::GossipEngine; use sp_runtime::traits::{NumberFor, Block as BlockT}; use super::gossip::{NeighborPacket, GossipMessage}; -// how often to rebroadcast, if no other +// How often to rebroadcast, in cases where no new packets are created. const REBROADCAST_AFTER: Duration = Duration::from_secs(2 * 60); fn rebroadcast_instant() -> Instant { @@ -56,56 +51,65 @@ impl NeighborPacketSender { } } -/// Does the work of sending neighbor packets, asynchronously. -/// -/// It may rebroadcast the last neighbor packet periodically when no -/// progress is made. -pub(super) fn neighbor_packet_worker(net: GossipEngine) -> ( - impl Future + Send + 'static, - NeighborPacketSender, -) where - B: BlockT, -{ - let mut last = None; - let (tx, mut rx) = mpsc::unbounded::<(Vec, NeighborPacket>)>(); - let mut delay = Delay::new(REBROADCAST_AFTER); - - let work = futures::future::poll_fn(move || { - loop { - match rx.poll().expect("unbounded receivers do not error; qed") { - Async::Ready(None) => return Ok(Async::Ready(())), - Async::Ready(Some((to, packet))) => { - // send to peers. - net.send_message(to.clone(), GossipMessage::::from(packet.clone()).encode()); - - // rebroadcasting network. - delay.reset(rebroadcast_instant()); - last = Some((to, packet)); - } - Async::NotReady => break, - } - } +/// NeighborPacketWorker is listening on a channel for new neighbor packets being produced by +/// components within `finality-grandpa` and forwards those packets to the underlying +/// `NetworkEngine` through the `NetworkBridge` that it is being polled by (see `Stream` +/// implementation). Periodically it sends out the last packet in cases where no new ones arrive. +pub(super) struct NeighborPacketWorker { + last: Option<(Vec, NeighborPacket>)>, + delay: Delay, + rx: mpsc::UnboundedReceiver<(Vec, NeighborPacket>)>, +} + +impl Unpin for NeighborPacketWorker {} + +impl NeighborPacketWorker { + pub(super) fn new() -> (Self, NeighborPacketSender){ + let (tx, rx) = mpsc::unbounded::<(Vec, NeighborPacket>)>(); + let delay = Delay::new(REBROADCAST_AFTER); + + (NeighborPacketWorker { + last: None, + delay, + rx, + }, NeighborPacketSender(tx)) + } +} - // has to be done in a loop because it needs to be polled after - // re-scheduling. - loop { - match (&mut delay).unit_error().compat().poll() { - Err(e) => { - warn!(target: "afg", "Could not rebroadcast neighbor packets: {:?}", e); - delay.reset(rebroadcast_instant()); - } - Ok(Async::Ready(())) => { - delay.reset(rebroadcast_instant()); - - if let Some((ref to, ref packet)) = last { - // send to peers. - net.send_message(to.clone(), GossipMessage::::from(packet.clone()).encode()); - } - } - Ok(Async::NotReady) => return Ok(Async::NotReady), +impl Stream for NeighborPacketWorker { + type Item = (Vec, GossipMessage); + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> + { + let this = &mut *self; + match this.rx.poll_next_unpin(cx) { + Poll::Ready(None) => return Poll::Ready(None), + Poll::Ready(Some((to, packet))) => { + this.delay.reset(rebroadcast_instant()); + this.last = Some((to.clone(), packet.clone())); + + return Poll::Ready(Some((to, GossipMessage::::from(packet.clone())))); } + // Don't return yet, maybe the timer fired. + Poll::Pending => {}, + }; + + ready!(this.delay.poll_unpin(cx)); + + // Getting this far here implies that the timer fired. + + this.delay.reset(rebroadcast_instant()); + + // Make sure the underlying task is scheduled for wake-up. + // + // Note: In case poll_unpin is called after the resetted delay fires again, this + // will drop one tick. Deemed as very unlikely and also not critical. + while let Poll::Ready(()) = this.delay.poll_unpin(cx) {}; + + if let Some((ref to, ref packet)) = this.last { + return Poll::Ready(Some((to.clone(), GossipMessage::::from(packet.clone())))); } - }); - (work, NeighborPacketSender(tx)) + return Poll::Pending; + } } diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index a016940a05652..c104af0339260 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -172,7 +172,6 @@ fn make_test_network(executor: &impl futures03::task::Spawn) -> ( config(), voter_set_state(), executor, - Exit, ); ( diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index d708e00bfd444..372229001dd19 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -412,7 +412,7 @@ for Environment where Block: 'static, B: Backend + 'static, - E: CallExecutor + Send + Sync + 'static, + E: CallExecutor + Send + Sync, N: NetworkT + 'static + Send, SC: SelectChain + 'static, VR: VotingRule>, diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index 809e0ab88a3a3..071214961f9a1 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -53,6 +53,7 @@ //! included in the newly-finalized chain. use futures::prelude::*; +use futures03::{StreamExt, future::ready}; use log::{debug, error, info}; use futures::sync::mpsc; use sc_client_api::{BlockchainEvents, CallExecutor, backend::{AuxStore, Backend}, ExecutionStrategy}; @@ -95,7 +96,7 @@ pub use justification::GrandpaJustification; pub use light_import::light_block_import; pub use observer::run_grandpa_observer; pub use voting_rule::{ - BeforeBestBlock, ThreeQuartersOfTheUnfinalizedChain, VotingRule, VotingRulesBuilder + BeforeBestBlockBy, ThreeQuartersOfTheUnfinalizedChain, VotingRule, VotingRulesBuilder }; use aux_schema::PersistentData; @@ -371,7 +372,7 @@ pub trait GenesisAuthoritySetProvider { impl GenesisAuthoritySetProvider for Client where B: Backend + Send + Sync + 'static, - E: CallExecutor + 'static + Clone + Send + Sync, + E: CallExecutor + Send + Sync, RA: Send + Sync, { fn get(&self) -> Result { @@ -407,7 +408,7 @@ pub fn block_import( ), ClientError> where B: Backend + 'static, - E: CallExecutor + 'static + Clone + Send + Sync, + E: CallExecutor + Send + Sync, RA: Send + Sync, SC: SelectChain, Client: AuxStore, @@ -535,7 +536,7 @@ pub struct GrandpaParams { /// Handle to a future that will resolve on exit. pub on_exit: X, /// If supplied, can be used to hook on telemetry connection established events. - pub telemetry_on_connect: Option>, + pub telemetry_on_connect: Option>, /// A voting rule used to potentially restrict target votes. pub voting_rule: VR, /// How to spawn background tasks. @@ -583,7 +584,6 @@ pub fn run_grandpa_voter( config.clone(), persistent_data.set_state.clone(), &executor, - on_exit.clone(), ); register_finality_tracker_inherent_data_provider(client.clone(), &inherent_data_providers)?; @@ -608,9 +608,10 @@ pub fn run_grandpa_voter( .expect("authorities is always at least an empty vector; elements are always of type string") } ); - Ok(()) + ready(()) }) - .then(|_| -> Result<(), ()> { Ok(()) }); + .unit_error() + .compat(); futures::future::Either::A(events) } else { futures::future::Either::B(futures::future::empty()) @@ -648,12 +649,13 @@ struct VoterWork, RA, SC, VR> { voter: Box>> + Send>, env: Arc>, voter_commands_rx: mpsc::UnboundedReceiver>>, + network: futures03::compat::Compat>, } impl VoterWork where Block: BlockT, - N: NetworkT + Sync, + N: NetworkT + Sync, NumberFor: BlockNumberOps, RA: 'static + Send + Sync, E: CallExecutor + Send + Sync + 'static, @@ -679,7 +681,7 @@ where voting_rule, voters: Arc::new(voters), config, - network, + network: network.clone(), set_id: persistent_data.authority_set.set_id(), authority_set: persistent_data.authority_set.clone(), consensus_changes: persistent_data.consensus_changes.clone(), @@ -692,6 +694,7 @@ where voter: Box::new(futures::empty()) as Box<_>, env, voter_commands_rx, + network: futures03::future::TryFutureExt::compat(network), }; work.rebuild_voter(); work @@ -829,7 +832,7 @@ where impl Future for VoterWork where Block: BlockT, - N: NetworkT + Sync, + N: NetworkT + Sync, NumberFor: BlockNumberOps, RA: 'static + Send + Sync, E: CallExecutor + Send + Sync + 'static, @@ -876,6 +879,15 @@ where } } + match self.network.poll() { + Ok(Async::NotReady) => {}, + Ok(Async::Ready(())) => { + // the network bridge future should never conclude. + return Ok(Async::Ready(())) + } + e @ Err(_) => return e, + }; + Ok(Async::NotReady) } } diff --git a/client/finality-grandpa/src/observer.rs b/client/finality-grandpa/src/observer.rs index 3dbb2aff6a9cf..989a1e1655e8d 100644 --- a/client/finality-grandpa/src/observer.rs +++ b/client/finality-grandpa/src/observer.rs @@ -67,7 +67,7 @@ fn grandpa_observer( ) -> impl Future>> where NumberFor: BlockNumberOps, B: Backend, - E: CallExecutor + Send + Sync, + E: CallExecutor + Send + Sync + 'static, RA: Send + Sync, S: Stream< Item = CommunicationIn, @@ -178,7 +178,6 @@ pub fn run_grandpa_observer( config.clone(), persistent_data.set_state.clone(), &executor, - on_exit.clone(), ); let observer_work = ObserverWork::new( diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 4ad08c8868f27..9ad12c6c317ad 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -25,7 +25,7 @@ use sc_network_test::{ use sc_network::config::{ProtocolConfig, Roles, BoxFinalityProofRequestBuilder}; use parking_lot::Mutex; use futures_timer::Delay; -use futures03::{StreamExt as _, TryStreamExt as _}; +use futures03::TryStreamExt as _; use tokio::runtime::current_thread; use sp_keyring::Ed25519Keyring; use sc_client::LongestChain; @@ -287,12 +287,10 @@ impl ApiExt for RuntimeApi { unimplemented!("Not required for testing!") } - fn into_storage_changes< - T: sp_api::ChangesTrieStorage, sp_api::NumberFor> - >( + fn into_storage_changes( &self, _: &Self::StateBackend, - _: Option<&T>, + _: Option<&sp_api::ChangesTrieState, sp_api::NumberFor>>, _: ::Hash, ) -> std::result::Result, String> where Self: Sized @@ -1272,7 +1270,6 @@ fn voter_persists_its_votes() { config.clone(), set_state, &threads_pool, - Exit, ); let (round_rx, round_tx) = network.round_communication( @@ -1677,7 +1674,6 @@ fn grandpa_environment_respects_voting_rules() { config.clone(), set_state.clone(), &threads_pool, - Exit, ); Environment { @@ -1694,8 +1690,8 @@ fn grandpa_environment_respects_voting_rules() { } }; - // add 20 blocks - peer.push_blocks(20, false); + // add 21 blocks + peer.push_blocks(21, false); // create an environment with no voting rule restrictions let unrestricted_env = environment(Box::new(())); @@ -1716,38 +1712,38 @@ fn grandpa_environment_respects_voting_rules() { unrestricted_env.best_chain_containing( peer.client().info().finalized_hash ).unwrap().1, - 20, + 21, ); - // both the other environments should return block 15, which is 3/4 of the + // both the other environments should return block 16, which is 3/4 of the // way in the unfinalized chain assert_eq!( three_quarters_env.best_chain_containing( peer.client().info().finalized_hash ).unwrap().1, - 15, + 16, ); assert_eq!( default_env.best_chain_containing( peer.client().info().finalized_hash ).unwrap().1, - 15, + 16, ); - // we finalize block 19 with block 20 being the best block + // we finalize block 19 with block 21 being the best block peer.client().finalize_block(BlockId::Number(19), None, false).unwrap(); - // the 3/4 environment should propose block 20 for voting + // the 3/4 environment should propose block 21 for voting assert_eq!( three_quarters_env.best_chain_containing( peer.client().info().finalized_hash ).unwrap().1, - 20, + 21, ); // while the default environment will always still make sure we don't vote - // on the best block + // on the best block (2 behind) assert_eq!( default_env.best_chain_containing( peer.client().info().finalized_hash @@ -1755,17 +1751,17 @@ fn grandpa_environment_respects_voting_rules() { 19, ); - // we finalize block 20 with block 20 being the best block - peer.client().finalize_block(BlockId::Number(20), None, false).unwrap(); + // we finalize block 21 with block 21 being the best block + peer.client().finalize_block(BlockId::Number(21), None, false).unwrap(); // even though the default environment will always try to not vote on the // best block, there's a hard rule that we can't cast any votes lower than - // the given base (#20). + // the given base (#21). assert_eq!( default_env.best_chain_containing( peer.client().info().finalized_hash ).unwrap().1, - 20, + 21, ); } diff --git a/client/finality-grandpa/src/voting_rule.rs b/client/finality-grandpa/src/voting_rule.rs index 8ba52f30f7b0f..523a1b05cd488 100644 --- a/client/finality-grandpa/src/voting_rule.rs +++ b/client/finality-grandpa/src/voting_rule.rs @@ -70,30 +70,38 @@ impl VotingRule for () where /// A custom voting rule that guarantees that our vote is always behind the best /// block, in the best case exactly one block behind it. #[derive(Clone)] -pub struct BeforeBestBlock; -impl VotingRule for BeforeBestBlock where +pub struct BeforeBestBlockBy(N); +impl VotingRule for BeforeBestBlockBy> where Block: BlockT, B: HeaderBackend, { fn restrict_vote( &self, - _backend: &B, + backend: &B, _base: &Block::Header, best_target: &Block::Header, current_target: &Block::Header, ) -> Option<(Block::Hash, NumberFor)> { + use sp_arithmetic::traits::Saturating; + if current_target.number().is_zero() { return None; } - if current_target.number() == best_target.number() { - return Some(( - current_target.parent_hash().clone(), - *current_target.number() - One::one(), - )); + // find the target number restricted by this rule + let target_number = best_target.number().saturating_sub(self.0); + + // our current target is already lower than this rule would restrict + if target_number >= *current_target.number() { + return None; } - None + // find the block at the given target height + find_target( + backend, + target_number, + current_target, + ) } } @@ -130,26 +138,43 @@ impl VotingRule for ThreeQuartersOfTheUnfinalizedChain where return None; } - let mut target_header = current_target.clone(); - let mut target_hash = current_target.hash(); - - // walk backwards until we find the target block - loop { - if *target_header.number() < target_number { - unreachable!( - "we are traversing backwards from a known block; \ - blocks are stored contiguously; \ - qed" - ); - } - if *target_header.number() == target_number { - return Some((target_hash, target_number)); - } - - target_hash = *target_header.parent_hash(); - target_header = backend.header(BlockId::Hash(target_hash)).ok()? - .expect("Header known to exist due to the existence of one of its descendents; qed"); + // find the block at the given target height + find_target( + backend, + target_number, + current_target, + ) + } +} + +// walk backwards until we find the target block +fn find_target( + backend: &B, + target_number: NumberFor, + current_header: &Block::Header, +) -> Option<(Block::Hash, NumberFor)> where + Block: BlockT, + B: HeaderBackend, +{ + let mut target_hash = current_header.hash(); + let mut target_header = current_header.clone(); + + loop { + if *target_header.number() < target_number { + unreachable!( + "we are traversing backwards from a known block; \ + blocks are stored contiguously; \ + qed" + ); + } + + if *target_header.number() == target_number { + return Some((target_hash, target_number)); } + + target_hash = *target_header.parent_hash(); + target_header = backend.header(BlockId::Hash(target_hash)).ok()? + .expect("Header known to exist due to the existence of one of its descendents; qed"); } } @@ -213,7 +238,7 @@ impl Default for VotingRulesBuilder where { fn default() -> Self { VotingRulesBuilder::new() - .add(BeforeBestBlock) + .add(BeforeBestBlockBy(2.into())) .add(ThreeQuartersOfTheUnfinalizedChain) } } diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index cf8d31c6d36e5..91c80e2213e00 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Gossiping for the Substrate network protocol" name = "sc-network-gossip" -version = "2.0.0" +version = "0.8.0" license = "GPL-3.0" authors = ["Parity Technologies "] edition = "2018" diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index d24f986f4cd21..f1d89f71b0ed1 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -25,13 +25,13 @@ rand = "0.7.2" libp2p = { version = "0.14.0-alpha.1", default-features = false, features = ["libp2p-websocket"] } fork-tree = { version = "2.0.0", path = "../../utils/fork-tree" } sp-consensus = { version = "0.8", path = "../../primitives/consensus/common" } -sc-client = { version = "2.0.0", path = "../" } +sc-client = { version = "0.8", path = "../" } sc-client-api = { version = "2.0.0", path = "../api" } sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } sp-arithmetic = { version = "2.0.0", path = "../../primitives/arithmetic" } sp-core = { version = "2.0.0", path = "../../primitives/core" } -sc-block-builder = { version = "2.0.0", path = "../block-builder" } +sc-block-builder = { version = "0.8", path = "../block-builder" } codec = { package = "parity-scale-codec", version = "1.0.0", features = ["derive"] } sc-peerset = { version = "2.0.0", path = "../peerset" } serde = { version = "1.0.101", features = ["derive"] } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index b712ebe515512..2aa29ea2793e3 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -1472,6 +1472,13 @@ impl, H: ExHashT> Protocol { who: PeerId, request: message::RemoteReadRequest, ) { + if request.keys.is_empty() { + debug!(target: "sync", "Invalid remote read request sent by {}", who); + self.behaviour.disconnect_peer(&who); + self.peerset_handle.report_peer(who, rep::BAD_MESSAGE); + return; + } + let keys_str = || match request.keys.len() { 1 => request.keys[0].to_hex::(), _ => format!( @@ -1510,6 +1517,13 @@ impl, H: ExHashT> Protocol { who: PeerId, request: message::RemoteReadChildRequest, ) { + if request.keys.is_empty() { + debug!(target: "sync", "Invalid remote child read request sent by {}", who); + self.behaviour.disconnect_peer(&who); + self.peerset_handle.report_peer(who, rep::BAD_MESSAGE); + return; + } + let keys_str = || match request.keys.len() { 1 => request.keys[0].to_hex::(), _ => format!( diff --git a/client/network/src/protocol/light_dispatch.rs b/client/network/src/protocol/light_dispatch.rs index dd94abc2a46fd..bfa8daa181ca1 100644 --- a/client/network/src/protocol/light_dispatch.rs +++ b/client/network/src/protocol/light_dispatch.rs @@ -691,7 +691,7 @@ pub mod tests { use crate::message::{self, BlockAttributes, Direction, FromBlock, RequestId}; use libp2p::PeerId; use super::{REQUEST_TIMEOUT, LightDispatch, LightDispatchNetwork, RequestData, StorageProof}; - use sp_test_primitives::{changes_trie_config, Block, Extrinsic, Header}; + use sp_test_primitives::{Block, Extrinsic, Header}; struct DummyFetchChecker { ok: bool } @@ -1095,7 +1095,11 @@ pub mod tests { let (tx, response) = oneshot::channel(); light_dispatch.add_request(&mut network_interface, RequestData::RemoteChanges(RemoteChangesRequest { - changes_trie_config: changes_trie_config(), + changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { + zero: (0, Default::default()), + end: None, + config: Some(sp_core::ChangesTrieConfiguration::new(4, 2)), + }], first_block: (1, Default::default()), last_block: (100, Default::default()), max_block: (100, Default::default()), diff --git a/client/network/src/protocol/sync.rs b/client/network/src/protocol/sync.rs index 8929fc75f8dd8..a513d47ca4198 100644 --- a/client/network/src/protocol/sync.rs +++ b/client/network/src/protocol/sync.rs @@ -308,8 +308,8 @@ impl ChainSync { best_queued_hash: info.best_hash, best_queued_number: info.best_number, best_imported_number: info.best_number, - extra_finality_proofs: ExtraRequests::new(), - extra_justifications: ExtraRequests::new(), + extra_finality_proofs: ExtraRequests::new("finality proof"), + extra_justifications: ExtraRequests::new("justification"), role, required_block_attributes, queue_blocks: Default::default(), diff --git a/client/network/src/protocol/sync/extra_requests.rs b/client/network/src/protocol/sync/extra_requests.rs index a0ea68af5f334..44b42b154fff2 100644 --- a/client/network/src/protocol/sync/extra_requests.rs +++ b/client/network/src/protocol/sync/extra_requests.rs @@ -18,7 +18,7 @@ use sp_blockchain::Error as ClientError; use crate::protocol::sync::{PeerSync, PeerSyncState}; use fork_tree::ForkTree; use libp2p::PeerId; -use log::{debug, warn}; +use log::{debug, trace, warn}; use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; use std::collections::{HashMap, HashSet, VecDeque}; use std::time::{Duration, Instant}; @@ -48,10 +48,12 @@ pub(crate) struct ExtraRequests { failed_requests: HashMap, Vec<(PeerId, Instant)>>, /// successful requests importing_requests: HashSet>, + /// the name of this type of extra request (useful for logging.) + request_type_name: &'static str, } impl ExtraRequests { - pub(crate) fn new() -> Self { + pub(crate) fn new(request_type_name: &'static str) -> Self { ExtraRequests { tree: ForkTree::new(), best_seen_finalized_number: Zero::zero(), @@ -59,6 +61,7 @@ impl ExtraRequests { active_requests: HashMap::new(), failed_requests: HashMap::new(), importing_requests: HashSet::new(), + request_type_name, } } @@ -113,11 +116,28 @@ impl ExtraRequests { // messages to chain sync. if let Some(request) = self.active_requests.remove(&who) { if let Some(r) = resp { + trace!(target: "sync", "Queuing import of {} from {:?} for {:?}", + self.request_type_name, + who, + request, + ); + self.importing_requests.insert(request); return Some((who, request.0, request.1, r)) + } else { + trace!(target: "sync", "Empty {} response from {:?} for {:?}", + self.request_type_name, + who, + request, + ); } self.failed_requests.entry(request).or_insert(Vec::new()).push((who, Instant::now())); self.pending_requests.push_front(request); + } else { + trace!(target: "sync", "No active {} request to {:?}", + self.request_type_name, + who, + ); } None } @@ -265,6 +285,13 @@ impl<'a, B: BlockT> Matcher<'a, B> { continue } self.extras.active_requests.insert(peer.clone(), request); + + trace!(target: "sync", "Sending {} request to {:?} for {:?}", + self.extras.request_type_name, + peer, + request, + ); + return Some((peer.clone(), request)) } @@ -293,7 +320,7 @@ mod tests { #[test] fn requests_are_processed_in_order() { fn property(mut peers: ArbitraryPeers) { - let mut requests = ExtraRequests::::new(); + let mut requests = ExtraRequests::::new("test"); let num_peers_available = peers.0.values() .filter(|s| s.state == PeerSyncState::Available).count(); @@ -319,7 +346,7 @@ mod tests { #[test] fn new_roots_schedule_new_request() { fn property(data: Vec) { - let mut requests = ExtraRequests::::new(); + let mut requests = ExtraRequests::::new("test"); for (i, number) in data.into_iter().enumerate() { let hash = [i as u8; 32].into(); let pending = requests.pending_requests.len(); @@ -336,7 +363,7 @@ mod tests { #[test] fn disconnecting_implies_rescheduling() { fn property(mut peers: ArbitraryPeers) -> bool { - let mut requests = ExtraRequests::::new(); + let mut requests = ExtraRequests::::new("test"); let num_peers_available = peers.0.values() .filter(|s| s.state == PeerSyncState::Available).count(); @@ -371,7 +398,7 @@ mod tests { #[test] fn no_response_reschedules() { fn property(mut peers: ArbitraryPeers) { - let mut requests = ExtraRequests::::new(); + let mut requests = ExtraRequests::::new("test"); let num_peers_available = peers.0.values() .filter(|s| s.state == PeerSyncState::Available).count(); @@ -404,7 +431,7 @@ mod tests { fn request_is_rescheduled_when_earlier_block_is_finalized() { let _ = ::env_logger::try_init(); - let mut finality_proofs = ExtraRequests::::new(); + let mut finality_proofs = ExtraRequests::::new("test"); let hash4 = [4; 32].into(); let hash5 = [5; 32].into(); @@ -442,7 +469,7 @@ mod tests { #[test] fn anecstor_roots_are_finalized_when_finality_notification_is_missed() { - let mut finality_proofs = ExtraRequests::::new(); + let mut finality_proofs = ExtraRequests::::new("test"); let hash4 = [4; 32].into(); let hash5 = [5; 32].into(); diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 4e923a51a123e..9c9a87bb67e01 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -33,10 +33,10 @@ use std::task::Poll; use sp_consensus::import_queue::{ImportQueue, Link}; use sp_consensus::import_queue::{BlockImportResult, BlockImportError}; use futures::{prelude::*, channel::mpsc}; -use log::{warn, error, info}; +use log::{warn, error, info, trace}; use libp2p::{PeerId, Multiaddr, kad::record}; use libp2p::core::{transport::boxed::Boxed, muxing::StreamMuxerBox}; -use libp2p::swarm::NetworkBehaviour; +use libp2p::swarm::{NetworkBehaviour, SwarmEvent}; use parking_lot::Mutex; use sc_peerset::PeersetHandle; use sp_runtime::{traits::{Block as BlockT, NumberFor}, ConsensusEngineId}; @@ -774,23 +774,32 @@ impl, H: ExHashT> Future for Ne loop { // Process the next action coming from the network. - let poll_value = this.network_service.poll_next_unpin(cx); + let next_event = this.network_service.next_event(); + futures::pin_mut!(next_event); + let poll_value = next_event.poll_unpin(cx); match poll_value { Poll::Pending => break, - Poll::Ready(Some(BehaviourOut::BlockImport(origin, blocks))) => + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::BlockImport(origin, blocks))) => this.import_queue.import_blocks(origin, blocks), - Poll::Ready(Some(BehaviourOut::JustificationImport(origin, hash, nb, justification))) => + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::JustificationImport(origin, hash, nb, justification))) => this.import_queue.import_justification(origin, hash, nb, justification), - Poll::Ready(Some(BehaviourOut::FinalityProofImport(origin, hash, nb, proof))) => + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::FinalityProofImport(origin, hash, nb, proof))) => this.import_queue.import_finality_proof(origin, hash, nb, proof), - Poll::Ready(Some(BehaviourOut::Event(ev))) => { - this.event_streams.retain(|sender| sender.unbounded_send(ev.clone()).is_ok()); - }, - Poll::Ready(None) => { - error!(target: "sync", "Network events stream has returned None"); - break; - }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::Event(ev))) => + this.event_streams.retain(|sender| sender.unbounded_send(ev.clone()).is_ok()), + Poll::Ready(SwarmEvent::Connected(peer_id)) => + trace!(target: "sub-libp2p", "Libp2p => Connected({:?})", peer_id), + Poll::Ready(SwarmEvent::Disconnected(peer_id)) => + trace!(target: "sub-libp2p", "Libp2p => Disconnected({:?})", peer_id), + Poll::Ready(SwarmEvent::NewListenAddr(addr)) => + trace!(target: "sub-libp2p", "Libp2p => NewListenAddr({})", addr), + Poll::Ready(SwarmEvent::ExpiredListenAddr(addr)) => + trace!(target: "sub-libp2p", "Libp2p => ExpiredListenAddr({})", addr), + Poll::Ready(SwarmEvent::UnreachableAddr { peer_id, address, error }) => + trace!(target: "sub-libp2p", "Libp2p => Failed to reach {:?} through {:?}: {}", peer_id, address, error), + Poll::Ready(SwarmEvent::StartConnect(peer_id)) => + trace!(target: "sub-libp2p", "Libp2p => StartConnect({:?})", peer_id), }; } diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index 381276ecfa5bd..036607df921ba 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Integration tests for Substrate network protocol" name = "sc-network-test" -version = "2.0.0" +version = "0.8.0" license = "GPL-3.0" authors = ["Parity Technologies "] edition = "2018" @@ -16,12 +16,12 @@ futures-timer = "0.4.0" rand = "0.7.2" libp2p = { version = "0.14.0-alpha.1", default-features = false, features = ["libp2p-websocket"] } sp-consensus = { version = "0.8", path = "../../../primitives/consensus/common" } -sc-client = { version = "2.0.0", path = "../../" } +sc-client = { version = "0.8", path = "../../" } sc-client-api = { version = "2.0.0", path = "../../api" } sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sc-block-builder = { version = "2.0.0", path = "../../block-builder" } +sc-block-builder = { version = "0.8", path = "../../block-builder" } sp-consensus-babe = { version = "0.8", path = "../../../primitives/consensus/babe" } env_logger = "0.7.0" substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index d738eb3d6c3ed..2140de0973449 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -80,7 +80,7 @@ fn sync_cycle_from_offline_to_syncing_to_offline() { } if peer < 2 { // Major syncing. - if !net.peer(peer).is_major_syncing() { + if net.peer(peer).blocks_count() < 100 && !net.peer(peer).is_major_syncing() { return Ok(Async::NotReady) } } diff --git a/client/offchain/Cargo.toml b/client/offchain/Cargo.toml index 8048d10dc34fc..3ef98f1ab9004 100644 --- a/client/offchain/Cargo.toml +++ b/client/offchain/Cargo.toml @@ -31,7 +31,7 @@ hyper = "0.12.35" hyper-rustls = "0.17.1" [dev-dependencies] -sc-client-db = { version = "2.0.0", default-features = true, path = "../db/" } +sc-client-db = { version = "0.8", default-features = true, path = "../db/" } env_logger = "0.7.0" substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } tokio = "0.1.22" diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index e25072d42d603..78760fb011c1c 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -101,13 +101,14 @@ impl OffchainWorkers< let has_api_v1 = runtime.has_api_with::, _>( &at, |v| v == 1 ); - let has_api_v2 = runtime.has_api::>(&at); + let has_api_v2 = runtime.has_api_with::, _>( + &at, |v| v == 2 + ); let version = match (has_api_v1, has_api_v2) { (_, Ok(true)) => 2, (Ok(true), _) => 1, _ => 0, }; - debug!("Checking offchain workers at {:?}: version:{}", at, version); if version > 0 { let (api, runner) = api::AsyncApi::new( @@ -139,6 +140,8 @@ impl OffchainWorkers< }); futures::future::Either::Left(runner.process()) } else { + let help = "Consider turning off offchain workers if they are not part of your runtime."; + log::error!("Unsupported Offchain Worker API version: {}. {}", version, help); futures::future::Either::Right(futures::future::ready(())) } } diff --git a/client/rpc-api/Cargo.toml b/client/rpc-api/Cargo.toml index 2c7a2ee071d7f..7f1a12f80e80d 100644 --- a/client/rpc-api/Cargo.toml +++ b/client/rpc-api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-rpc-api" -version = "2.0.0" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index f4a42d169a6dc..6530bff4ed652 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -5,9 +5,9 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] -sc-rpc-api = { version = "2.0.0", path = "../rpc-api" } +sc-rpc-api = { version = "0.8", path = "../rpc-api" } sc-client-api = { version = "2.0.0", path = "../api" } -sc-client = { version = "2.0.0", path = "../" } +sc-client = { version = "0.8", path = "../" } sp-api = { version = "2.0.0", path = "../../primitives/api" } codec = { package = "parity-scale-codec", version = "1.0.0" } futures = { version = "0.3.1", features = ["compat"] } @@ -20,8 +20,8 @@ serde_json = "1.0.41" sp-session = { version = "2.0.0", path = "../../primitives/session" } sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } sp-rpc = { version = "2.0.0", path = "../../primitives/rpc" } -sp-state-machine = { version = "2.0.0", path = "../../primitives/state-machine" } -sc-executor = { version = "2.0.0", path = "../executor" } +sp-state-machine = { version = "0.8", path = "../../primitives/state-machine" } +sc-executor = { version = "0.8", path = "../executor" } sc-keystore = { version = "2.0.0", path = "../keystore" } sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } diff --git a/client/rpc/src/state/state_light.rs b/client/rpc/src/state/state_light.rs index abf4631bb82eb..482eb0723ff80 100644 --- a/client/rpc/src/state/state_light.rs +++ b/client/rpc/src/state/state_light.rs @@ -325,8 +325,8 @@ impl StateBackend for LightState> ) { let keys = match keys { - Some(keys) => keys, - None => { + Some(keys) if !keys.is_empty() => keys, + _ => { warn!("Cannot subscribe to all keys on light client. Subscription rejected."); return; } diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index 5951ee86a93ce..a0ab11e977204 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -21,7 +21,7 @@ use self::error::Error; use std::sync::Arc; use assert_matches::assert_matches; use futures01::stream::Stream; -use sp_core::storage::{well_known_keys, ChildInfo}; +use sp_core::{storage::{well_known_keys, ChildInfo}, ChangesTrieConfiguration}; use sp_core::hash::H256; use sp_io::hashing::blake2_256; use substrate_test_runtime_client::{ @@ -378,7 +378,9 @@ fn should_query_storage() { } run_tests(Arc::new(substrate_test_runtime_client::new())); - run_tests(Arc::new(TestClientBuilder::new().set_support_changes_trie(true).build())); + run_tests(Arc::new(TestClientBuilder::new() + .changes_trie_config(Some(ChangesTrieConfiguration::new(4, 2))) + .build())); } #[test] @@ -399,7 +401,7 @@ fn should_return_runtime_version() { let api = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor()))); let result = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":1,\ - \"specVersion\":1,\"implVersion\":1,\"apis\":[[\"0xdf6acb689907609b\",2],\ + \"specVersion\":1,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",2],\ [\"0x37e397fc7c91f5e4\",1],[\"0xd2bc9897eed08f15\",1],[\"0x40fe3ad401f8959a\",4],\ [\"0xc6e9a76309f39b09\",1],[\"0xdd718d5cc53262d4\",1],[\"0xcbca25e39f142387\",1],\ [\"0xf78b278be53f454c\",2],[\"0xab3c0572291feb8b\",1],[\"0xbc9d89904f5b923f\",1]]}"; diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 42f0a73dfbfa2..9946377693110 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-service" -version = "2.0.0" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" @@ -15,14 +15,14 @@ wasmtime = [ [dependencies] derive_more = "0.99.2" -futures = "0.1.29" -futures03 = { package = "futures", version = "0.3.1", features = ["compat"] } +futures01 = { package = "futures", version = "0.1.29" } +futures = "0.3.1" parking_lot = "0.9.0" lazy_static = "1.4.0" log = "0.4.8" slog = { version = "2.5.2", features = ["nested-values"] } tokio-executor = "0.1.8" -tokio-timer = "0.2.11" +futures-timer = "2" exit-future = "0.2.0" serde = "1.0.101" serde_json = "1.0.41" @@ -39,11 +39,11 @@ sp-consensus = { version = "0.8", path = "../../primitives/consensus/common" } sc-network = { version = "0.8", path = "../network" } sc-chain-spec = { version = "2.0.0", path = "../chain-spec" } sc-client-api = { version = "2.0.0", path = "../api" } -sc-client = { version = "2.0.0", path = "../" } +sc-client = { version = "0.8", path = "../" } sp-api = { version = "2.0.0", path = "../../primitives/api" } -sc-client-db = { version = "2.0.0", path = "../db" } +sc-client-db = { version = "0.8", path = "../db" } codec = { package = "parity-scale-codec", version = "1.0.0" } -sc-executor = { version = "2.0.0", path = "../executor" } +sc-executor = { version = "0.8", path = "../executor" } sc-transaction-pool = { version = "2.0.0", path = "../transaction-pool" } sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } sc-rpc-server = { version = "2.0.0", path = "../rpc-servers" } @@ -51,13 +51,13 @@ sc-rpc = { version = "2.0.0", path = "../rpc" } sc-telemetry = { version = "2.0.0", path = "../telemetry" } sc-offchain = { version = "2.0.0", path = "../offchain" } parity-multiaddr = { package = "parity-multiaddr", version = "0.5.0" } -prometheus-endpoint = { path = "../../utils/prometheus" } +prometheus-exporter = { path = "../../utils/prometheus" } sc-tracing = { version = "2.0.0", path = "../tracing" } tracing = "0.1.10" [dev-dependencies] substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } sp-consensus-babe = { version = "0.8", path = "../../primitives/consensus/babe" } -grandpa = { version = "2.0.0", package = "sc-finality-grandpa", path = "../finality-grandpa" } +grandpa = { version = "0.8", package = "sc-finality-grandpa", path = "../finality-grandpa" } grandpa-primitives = { version = "2.0.0", package = "sp-finality-grandpa", path = "../../primitives/finality-grandpa" } -tokio = "0.1" +tokio = { version = "0.2", features = ["rt-core"] } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 2e4b1ee6af48c..85d4091eed02b 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -27,12 +27,10 @@ use sc_client_api::{ use sc_client::Client; use sc_chain_spec::{RuntimeGenesis, Extension}; use sp_consensus::import_queue::ImportQueue; -use futures::{prelude::*, sync::mpsc}; -use futures03::{ - compat::Compat, - FutureExt as _, TryFutureExt as _, - StreamExt as _, TryStreamExt as _, - future::{select, Either} +use futures::{ + Future, FutureExt, StreamExt, + channel::mpsc, + future::{select, ready} }; use sc_keystore::{Store as Keystore}; use log::{info, warn, error}; @@ -41,52 +39,56 @@ use sc_network::{config::BoxFinalityProofRequestBuilder, specialization::Network use parking_lot::{Mutex, RwLock}; use sp_runtime::generic::BlockId; use sp_runtime::traits::{ - Block as BlockT, NumberFor, SaturatedConversion, HasherFor, + Block as BlockT, NumberFor, SaturatedConversion, HasherFor, UniqueSaturatedInto, }; use sp_api::ProvideRuntimeApi; use sc_executor::{NativeExecutor, NativeExecutionDispatch}; use std::{ io::{Read, Write, Seek}, - marker::PhantomData, sync::Arc, time::SystemTime + marker::PhantomData, sync::Arc, time::SystemTime, pin::Pin }; use sysinfo::{get_current_pid, ProcessExt, System, SystemExt}; use sc_telemetry::{telemetry, SUBSTRATE_INFO}; use sp_transaction_pool::{TransactionPool, TransactionPoolMaintainer}; use sp_blockchain; -use prometheus_endpoint::{create_gauge, Gauge, U64, F64}; +use prometheus_exporter::{create_gauge, Gauge, U64, F64, Registry}; -prometheus_endpoint::lazy_static! { +prometheus_exporter::lazy_static! { pub static ref FINALITY_HEIGHT: Gauge = create_gauge( - "consensus_finality_block_height_number", - "block is finality HEIGHT" + "finality_block_height_number", + "Height of the highest finalized block" ); pub static ref BEST_HEIGHT: Gauge = create_gauge( - "consensus_best_block_height_number", - "block is best HEIGHT" + "best_block_height_number", + "Height of the highest block" ); - pub static ref P2P_PEERS_NUM: Gauge = create_gauge( - "p2p_peers_number", - "network gosip peers number" + pub static ref PEERS_NUM: Gauge = create_gauge( + "peers_count", + "Number of network gossip peers" ); pub static ref TX_COUNT: Gauge = create_gauge( - "consensus_num_txs", + "transaction_count", "Number of transactions" ); pub static ref NODE_MEMORY: Gauge = create_gauge( - "consensus_node_memory", - "node memory" + "memory_usage", + "Node memory usage" ); pub static ref NODE_CPU: Gauge = create_gauge( - "consensus_node_cpu", - "node cpu" + "cpu_usage", + "Node CPU usage" ); - pub static ref P2P_NODE_DOWNLOAD: Gauge = create_gauge( - "p2p_peers_receive_byte_per_sec", - "p2p_node_download_per_sec_byte" + pub static ref NODE_DOWNLOAD: Gauge = create_gauge( + "receive_byte_per_sec", + "Received bytes per second" ); - pub static ref P2P_NODE_UPLOAD: Gauge = create_gauge( - "p2p_peers_send_byte_per_sec", - "p2p_node_upload_per_sec_byte" + pub static ref NODE_UPLOAD: Gauge = create_gauge( + "sent_byte_per_sec", + "Sent bytes per second" + ); + pub static ref SYNC_TARGET: Gauge = create_gauge( + "sync_target_number", + "Block sync target number" ); } /// Aggregator for the components required to build a service. @@ -124,6 +126,7 @@ pub struct ServiceBuilder>>, marker: PhantomData<(TBl, TRtApi)>, + prometheus_registry: Option } /// Full client type. @@ -183,7 +186,7 @@ pub fn new_full_client( config: &Configuration, ) -> Result, Error> where TBl: BlockT, - TExecDisp: NativeExecutionDispatch, + TExecDisp: NativeExecutionDispatch + 'static, TGen: sp_runtime::BuildStorage + serde::Serialize + for<'de> serde::Deserialize<'de>, TCSExt: Extension, { @@ -194,7 +197,7 @@ fn new_full_parts( config: &Configuration, ) -> Result, Error> where TBl: BlockT, - TExecDisp: NativeExecutionDispatch, + TExecDisp: NativeExecutionDispatch + 'static, TGen: sp_runtime::BuildStorage + serde::Serialize + for<'de> serde::Deserialize<'de>, TCSExt: Extension, { @@ -262,7 +265,7 @@ fn new_full_parts( impl ServiceBuilder<(), (), TCfg, TGen, TCSExt, (), (), (), (), (), (), (), (), (), ()> where TGen: RuntimeGenesis, TCSExt: Extension { /// Start the service builder with a configuration. - pub fn new_full( + pub fn new_full( config: Configuration ) -> Result Self { + Self { + config: self.config, + client: self.client, + backend: self.backend, + keystore: self.keystore, + fetcher: self.fetcher, + select_chain: self.select_chain, + import_queue: self.import_queue, + finality_proof_request_builder: self.finality_proof_request_builder, + finality_proof_provider: self.finality_proof_provider, + network_protocol: self.network_protocol, + transaction_pool: self.transaction_pool, + rpc_extensions: self.rpc_extensions, + remote_backend: self.remote_backend, + marker: self.marker, + prometheus_registry: Some(registry), + } + } } /// Implemented on `ServiceBuilder`. Allows running block commands, such as import/export/validate @@ -716,7 +749,7 @@ pub trait ServiceBuilderCommand { self, input: impl Read + Seek + Send + 'static, force: bool, - ) -> Box + Send>; + ) -> Pin> + Send>>; /// Performs the blocks export. fn export_blocks( @@ -725,7 +758,7 @@ pub trait ServiceBuilderCommand { from: NumberFor, to: Option>, json: bool - ) -> Box>; + ) -> Pin>>>; /// Performs a revert of `blocks` blocks. fn revert_chain( @@ -737,7 +770,7 @@ pub trait ServiceBuilderCommand { fn check_block( self, block: BlockId - ) -> Box + Send>; + ) -> Pin> + Send>>; } impl @@ -817,6 +850,7 @@ ServiceBuilder< transaction_pool, rpc_extensions, remote_backend, + prometheus_registry, } = self; sp_session::generate_initial_session_keys( @@ -829,7 +863,7 @@ ServiceBuilder< // List of asynchronous tasks to spawn. We collect them, then spawn them all at once. let (to_spawn_tx, to_spawn_rx) = - mpsc::unbounded:: + Send>>(); + mpsc::unbounded:: + Send>>>(); // A side-channel for essential tasks to communicate shutdown. let (essential_failed_tx, essential_failed_rx) = mpsc::unbounded(); @@ -913,7 +947,6 @@ ServiceBuilder< let is_validator = config.roles.is_authority(); let events = client.import_notification_stream() - .map(|v| Ok::<_, ()>(v)).compat() .for_each(move |notification| { let txpool = txpool.upgrade(); @@ -921,8 +954,8 @@ ServiceBuilder< let future = txpool.maintain( &BlockId::hash(notification.hash), ¬ification.retracted, - ).map(|_| Ok(())).compat(); - let _ = to_spawn_tx_.unbounded_send(Box::new(future)); + ); + let _ = to_spawn_tx_.unbounded_send(Box::pin(future)); } let offchain = offchain.as_ref().and_then(|o| o.upgrade()); @@ -931,15 +964,13 @@ ServiceBuilder< ¬ification.header, network_state_info.clone(), is_validator - ).map(|()| Ok(())); - let _ = to_spawn_tx_.unbounded_send(Box::new(Compat::new(future))); + ); + let _ = to_spawn_tx_.unbounded_send(Box::pin(future)); } - Ok(()) - }) - .select(exit.clone().map(Ok).compat()) - .then(|_| Ok(())); - let _ = to_spawn_tx.unbounded_send(Box::new(events)); + ready(()) + }); + let _ = to_spawn_tx.unbounded_send(Box::pin(select(events, exit.clone()).map(drop))); } { @@ -947,7 +978,6 @@ ServiceBuilder< let network = Arc::downgrade(&network); let transaction_pool_ = transaction_pool.clone(); let events = transaction_pool.import_notification_stream() - .map(|v| Ok::<_, ()>(v)).compat() .for_each(move |_| { if let Some(network) = network.upgrade() { network.trigger_repropagate(); @@ -957,12 +987,10 @@ ServiceBuilder< "ready" => status.ready, "future" => status.future ); - Ok(()) - }) - .select(exit.clone().map(Ok).compat()) - .then(|_| Ok(())); + ready(()) + }); - let _ = to_spawn_tx.unbounded_send(Box::new(events)); + let _ = to_spawn_tx.unbounded_send(Box::pin(select(events, exit.clone()).map(drop))); } // Periodically notify the telemetry. @@ -981,6 +1009,8 @@ ServiceBuilder< let finalized_number: u64 = info.chain.finalized_number.saturated_into::(); let bandwidth_download = net_status.average_download_per_sec; let bandwidth_upload = net_status.average_upload_per_sec; + let best_seen_block = net_status.best_seen_block + .map(|num: NumberFor| num.unique_saturated_into() as u64); // get cpu usage and memory usage of this process let (cpu_usage, memory) = if let Some(self_pid) = self_pid { @@ -1014,12 +1044,16 @@ ServiceBuilder< TX_COUNT.set(txpool_status.ready as u64); FINALITY_HEIGHT.set(finalized_number); BEST_HEIGHT.set(best_number); - P2P_PEERS_NUM.set(num_peers as u64); - P2P_NODE_DOWNLOAD.set(net_status.average_download_per_sec); - P2P_NODE_UPLOAD.set(net_status.average_upload_per_sec); - Ok(()) - }).select(exit.clone().map(Ok).compat()).then(|_| Ok(())); - let _ = to_spawn_tx.unbounded_send(Box::new(tel_task)); + PEERS_NUM.set(num_peers as u64); + NODE_DOWNLOAD.set(net_status.average_download_per_sec); + NODE_UPLOAD.set(net_status.average_upload_per_sec); + if let Some(best_seen_block) = best_seen_block { + SYNC_TARGET.set(best_seen_block); + } + + ready(()) + }); + let _ = to_spawn_tx.unbounded_send(Box::pin(select(tel_task, exit.clone()).map(drop))); // Periodically send the network state to the telemetry. let (netstat_tx, netstat_rx) = mpsc::unbounded::<(NetworkStatus<_>, NetworkState)>(); @@ -1030,12 +1064,12 @@ ServiceBuilder< "system.network_state"; "state" => network_state, ); - Ok(()) - }).select(exit.clone().map(Ok).compat()).then(|_| Ok(())); - let _ = to_spawn_tx.unbounded_send(Box::new(tel_task_2)); + ready(()) + }); + let _ = to_spawn_tx.unbounded_send(Box::pin(select(tel_task_2, exit.clone()).map(drop))); // RPC - let (system_rpc_tx, system_rpc_rx) = futures03::channel::mpsc::unbounded(); + let (system_rpc_tx, system_rpc_rx) = mpsc::unbounded(); let gen_handler = || { use sc_rpc::{chain, state, author, system}; @@ -1095,17 +1129,14 @@ ServiceBuilder< let rpc = start_rpc_servers(&config, gen_handler)?; - let _ = to_spawn_tx.unbounded_send(Box::new(build_network_future( + let _ = to_spawn_tx.unbounded_send(Box::pin(select(build_network_future( config.roles, network_mut, client.clone(), network_status_sinks.clone(), system_rpc_rx, has_bootnodes, - ) - .map_err(|_| ()) - .select(exit.clone().map(Ok).compat()) - .then(|_| Ok(())))); + ), exit.clone()).map(drop))); let telemetry_connection_sinks: Arc>>> = Default::default(); @@ -1126,8 +1157,6 @@ ServiceBuilder< .map(|dur| dur.as_millis()) .unwrap_or(0); let future = telemetry.clone() - .map(|ev| Ok::<_, ()>(ev)) - .compat() .for_each(move |event| { // Safe-guard in case we add more events in the future. let sc_telemetry::TelemetryEvent::Connected = event; @@ -1146,25 +1175,36 @@ ServiceBuilder< telemetry_connection_sinks_.lock().retain(|sink| { sink.unbounded_send(()).is_ok() }); - Ok(()) + ready(()) }); - let _ = to_spawn_tx.unbounded_send(Box::new(future - .select(exit.clone().map(Ok).compat()) - .then(|_| Ok(())))); + let _ = to_spawn_tx.unbounded_send(Box::pin(select( + future, exit.clone() + ).map(drop))); telemetry }); - // Prometheus endpoint + // Prometheus exporter if let Some(port) = config.prometheus_port { + let registry = match prometheus_registry { + Some(registry) => registry, + None => Registry::new_custom(Some("substrate".into()), None)? + }; + + registry.register(Box::new(NODE_MEMORY.clone()))?; + registry.register(Box::new(NODE_CPU.clone()))?; + registry.register(Box::new(TX_COUNT.clone()))?; + registry.register(Box::new(FINALITY_HEIGHT.clone()))?; + registry.register(Box::new(BEST_HEIGHT.clone()))?; + registry.register(Box::new(PEERS_NUM.clone()))?; + registry.register(Box::new(NODE_DOWNLOAD.clone()))?; + registry.register(Box::new(NODE_UPLOAD.clone()))?; + let future = select( - prometheus_endpoint::init_prometheus(port).boxed(), + prometheus_exporter::init_prometheus(port, registry).boxed(), exit.clone() - ).map(|either| match either { - Either::Left((result, _)) => result.map_err(|_| ()), - Either::Right(_) => Ok(()) - }).compat(); + ).map(drop); - let _ = to_spawn_tx.unbounded_send(Box::new(future)); - } + let _ = to_spawn_tx.unbounded_send(Box::pin(future)); + } // Instrumentation if let Some(tracing_targets) = config.tracing_targets.as_ref() { diff --git a/client/service/src/chain_ops.rs b/client/service/src/chain_ops.rs index 0b86fb366f0b6..0c2fe79718a72 100644 --- a/client/service/src/chain_ops.rs +++ b/client/service/src/chain_ops.rs @@ -22,9 +22,6 @@ use crate::error::Error; use sc_chain_spec::{ChainSpec, RuntimeGenesis, Extension}; use log::{warn, info}; use futures::{future, prelude::*}; -use futures03::{ - TryFutureExt as _, -}; use sp_runtime::traits::{ Block as BlockT, NumberFor, One, Zero, Header, SaturatedConversion }; @@ -34,9 +31,7 @@ use sc_client::Client; use sp_consensus::import_queue::{IncomingBlock, Link, BlockImportError, BlockImportResult, ImportQueue}; use sp_consensus::BlockOrigin; -use std::{ - io::{Read, Write, Seek}, -}; +use std::{io::{Read, Write, Seek}, pin::Pin}; use sc_network::message; @@ -68,7 +63,7 @@ impl< self, input: impl Read + Seek + Send + 'static, force: bool, - ) -> Box + Send> { + ) -> Pin> + Send>> { struct WaitLink { imported_blocks: u64, has_error: bool, @@ -117,7 +112,7 @@ impl< // queue, the `Future` re-schedules itself and returns `Poll::Pending`. // This makes it possible either to interleave other operations in-between the block imports, // or to stop the operation completely. - let import = futures03::future::poll_fn(move |cx| { + let import = future::poll_fn(move |cx| { // Start by reading the number of blocks if not done so already. let count = match count { Some(c) => c, @@ -205,7 +200,7 @@ impl< return std::task::Poll::Pending; } }); - Box::new(import.compat()) + Box::pin(import) } fn export_blocks( @@ -214,7 +209,7 @@ impl< from: NumberFor, to: Option>, json: bool - ) -> Box> { + ) -> Pin>>> { let client = self.client; let mut block = from; @@ -233,7 +228,7 @@ impl< // `Poll::Pending`. // This makes it possible either to interleave other operations in-between the block exports, // or to stop the operation completely. - let export = futures03::future::poll_fn(move |cx| { + let export = future::poll_fn(move |cx| { if last < block { return std::task::Poll::Ready(Err("Invalid block range specified".into())); } @@ -274,7 +269,7 @@ impl< std::task::Poll::Pending }); - Box::new(export.compat()) + Box::pin(export) } fn revert_chain( @@ -295,7 +290,7 @@ impl< fn check_block( self, block_id: BlockId - ) -> Box + Send> { + ) -> Pin> + Send>> { match self.client.block(&block_id) { Ok(Some(block)) => { let mut buf = Vec::new(); @@ -304,8 +299,8 @@ impl< let reader = std::io::Cursor::new(buf); self.import_blocks(reader, true) } - Ok(None) => Box::new(future::err("Unknown block".into())), - Err(e) => Box::new(future::err(format!("Error reading block: {:?}", e).into())), + Ok(None) => Box::pin(future::err("Unknown block".into())), + Err(e) => Box::pin(future::err(format!("Error reading block: {:?}", e).into())), } } } diff --git a/client/service/src/config.rs b/client/service/src/config.rs index 9aa7f795ddeca..994ef2e3723b9 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -73,7 +73,7 @@ pub struct Configuration { pub rpc_ws_max_connections: Option, /// CORS settings for HTTP & WS servers. `None` if all origins are allowed. pub rpc_cors: Option>, - /// Prometheus endpoint Port. `None` if disabled. + /// Prometheus exporter Port. `None` if disabled. pub prometheus_port: Option, /// Telemetry service URL. `None` if disabled. pub telemetry_endpoints: Option, diff --git a/client/service/src/error.rs b/client/service/src/error.rs index 6516b1c62c6d6..4cbd172e4b2b3 100644 --- a/client/service/src/error.rs +++ b/client/service/src/error.rs @@ -50,6 +50,12 @@ impl<'a> From<&'a str> for Error { } } +impl From for Error { + fn from(e: prometheus_exporter::PrometheusError) -> Self { + Error::Other(format!("Prometheus error: {}", e)) + } +} + impl std::error::Error for Error { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self { diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 7a3c6fc9eaab3..87327d0967583 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -32,15 +32,17 @@ use std::marker::PhantomData; use std::net::SocketAddr; use std::collections::HashMap; use std::time::{Duration, Instant}; -use futures::sync::mpsc; +use std::task::{Poll, Context}; use parking_lot::Mutex; use sc_client::Client; use exit_future::Signal; -use futures::prelude::*; -use futures03::{ - future::{ready, FutureExt as _, TryFutureExt as _}, - stream::{StreamExt as _, TryStreamExt as _}, +use futures::{ + Future, FutureExt, Stream, StreamExt, TryFutureExt, + future::select, channel::mpsc, + compat::*, + sink::SinkExt, + task::{Spawn, SpawnExt, FutureObj, SpawnError}, }; use sc_network::{ NetworkService, NetworkState, specialization::NetworkSpecialization, @@ -67,8 +69,6 @@ pub use sc_rpc::Metadata as RpcMetadata; pub use std::{ops::Deref, result::Result, sync::Arc}; #[doc(hidden)] pub use sc_network::{FinalityProofProvider, OnDemand, config::BoxFinalityProofRequestBuilder}; -#[doc(hidden)] -pub use futures::future::Executor; const DEFAULT_PROTOCOL_ID: &str = "sup"; @@ -92,13 +92,13 @@ pub struct Service { /// A receiver for spawned essential-tasks concluding. essential_failed_rx: mpsc::UnboundedReceiver<()>, /// Sender for futures that must be spawned as background tasks. - to_spawn_tx: mpsc::UnboundedSender + Send>>, + to_spawn_tx: mpsc::UnboundedSender + Send>>>, /// Receiver for futures that must be spawned as background tasks. - to_spawn_rx: mpsc::UnboundedReceiver + Send>>, + to_spawn_rx: mpsc::UnboundedReceiver + Send>>>, /// List of futures to poll from `poll`. /// If spawning a background task is not possible, we instead push the task into this `Vec`. /// The elements must then be polled manually. - to_poll: Vec + Send>>, + to_poll: Vec + Send>>>, rpc_handlers: sc_rpc_server::RpcHandler, _rpc: Box, _telemetry: Option, @@ -109,42 +109,36 @@ pub struct Service { } /// Alias for a an implementation of `futures::future::Executor`. -pub type TaskExecutor = Arc + Send>> + Send + Sync>; +pub type TaskExecutor = Arc; /// An handle for spawning tasks in the service. #[derive(Clone)] pub struct SpawnTaskHandle { - sender: mpsc::UnboundedSender + Send>>, + sender: mpsc::UnboundedSender + Send>>>, on_exit: exit_future::Exit, } -impl Executor + Send>> for SpawnTaskHandle { - fn execute( - &self, - future: Box + Send>, - ) -> Result<(), futures::future::ExecuteError + Send>>> { - let exit = self.on_exit.clone().map(Ok).compat(); - let future = Box::new(future.select(exit).then(|_| Ok(()))); - if let Err(err) = self.sender.unbounded_send(future) { - let kind = futures::future::ExecuteErrorKind::Shutdown; - Err(futures::future::ExecuteError::new(kind, err.into_inner())) - } else { - Ok(()) - } +impl Spawn for SpawnTaskHandle { + fn spawn_obj(&self, future: FutureObj<'static, ()>) + -> Result<(), SpawnError> { + let future = select(self.on_exit.clone(), future).map(drop); + self.sender.unbounded_send(Box::pin(future)) + .map_err(|_| SpawnError::shutdown()) } } -impl futures03::task::Spawn for SpawnTaskHandle { - fn spawn_obj(&self, future: futures03::task::FutureObj<'static, ()>) - -> Result<(), futures03::task::SpawnError> { - self.execute(Box::new(futures03::compat::Compat::new(future.unit_error()))) - .map_err(|_| futures03::task::SpawnError::shutdown()) +type Boxed01Future01 = Box + Send + 'static>; + +impl futures01::future::Executor for SpawnTaskHandle { + fn execute(&self, future: Boxed01Future01) -> Result<(), futures01::future::ExecuteError>{ + self.spawn(future.compat().map(drop)); + Ok(()) } } /// Abstraction over a Substrate service. -pub trait AbstractService: 'static + Future + - Executor + Send>> + Send { +pub trait AbstractService: 'static + Future> + + Spawn + Send + Unpin { /// Type of block of this chain. type Block: BlockT; /// Backend storage for the client. @@ -168,12 +162,12 @@ pub trait AbstractService: 'static + Future + fn telemetry(&self) -> Option; /// Spawns a task in the background that runs the future passed as parameter. - fn spawn_task(&self, task: impl Future + Send + 'static); + fn spawn_task(&self, task: impl Future + Send + Unpin + 'static); /// Spawns a task in the background that runs the future passed as /// parameter. The given task is considered essential, i.e. if it errors we /// trigger a service exit. - fn spawn_essential_task(&self, task: impl Future + Send + 'static); + fn spawn_essential_task(&self, task: impl Future + Send + Unpin + 'static); /// Returns a handle for spawning tasks. fn spawn_task_handle(&self) -> SpawnTaskHandle; @@ -190,7 +184,7 @@ pub trait AbstractService: 'static + Future + /// /// If the request subscribes you to events, the `Sender` in the `RpcSession` object is used to /// send back spontaneous events. - fn rpc_query(&self, mem: &RpcSession, request: &str) -> Box, Error = ()> + Send>; + fn rpc_query(&self, mem: &RpcSession, request: &str) -> Pin> + Send>>; /// Get shared client instance. fn client(&self) -> Arc>; @@ -216,11 +210,11 @@ impl AbstractService Service, TSc, NetworkStatus, NetworkService, TExPool, TOc> where - TBl: BlockT, + TBl: BlockT + Unpin, TBackend: 'static + sc_client_api::backend::Backend, TExec: 'static + sc_client::CallExecutor + Send + Sync + Clone, TRtApi: 'static + Send + Sync, - TSc: sp_consensus::SelectChain + 'static + Clone + Send, + TSc: sp_consensus::SelectChain + 'static + Clone + Send + Unpin, TExPool: 'static + TransactionPool + TransactionPoolMaintainer, TOc: 'static + Send + Sync, @@ -248,25 +242,22 @@ where self.keystore.clone() } - fn spawn_task(&self, task: impl Future + Send + 'static) { - let exit = self.on_exit().map(Ok).compat(); - let task = task.select(exit).then(|_| Ok(())); - let _ = self.to_spawn_tx.unbounded_send(Box::new(task)); + fn spawn_task(&self, task: impl Future + Send + Unpin + 'static) { + let task = select(self.on_exit(), task).map(drop); + let _ = self.to_spawn_tx.unbounded_send(Box::pin(task)); } - fn spawn_essential_task(&self, task: impl Future + Send + 'static) { - let essential_failed = self.essential_failed_tx.clone(); + fn spawn_essential_task(&self, task: impl Future + Send + Unpin + 'static) { + let mut essential_failed = self.essential_failed_tx.clone(); let essential_task = std::panic::AssertUnwindSafe(task) .catch_unwind() - .then(move |_| { + .map(move |_| { error!("Essential task failed. Shutting down service."); let _ = essential_failed.send(()); - Ok(()) }); - let exit = self.on_exit().map(Ok::<_, ()>).compat(); - let task = essential_task.select(exit).then(|_| Ok(())); + let task = select(self.on_exit(), essential_task).map(drop); - let _ = self.to_spawn_tx.unbounded_send(Box::new(task)); + let _ = self.to_spawn_tx.unbounded_send(Box::pin(task)); } fn spawn_task_handle(&self) -> SpawnTaskHandle { @@ -276,8 +267,12 @@ where } } - fn rpc_query(&self, mem: &RpcSession, request: &str) -> Box, Error = ()> + Send> { - Box::new(self.rpc_handlers.handle_request(request, mem.metadata.clone())) + fn rpc_query(&self, mem: &RpcSession, request: &str) -> Pin> + Send>> { + Box::pin( + self.rpc_handlers.handle_request(request, mem.metadata.clone()) + .compat() + .map(|res| res.expect("this should never fail")) + ) } fn client(&self) -> Arc> { @@ -309,57 +304,56 @@ where } } -impl Future for +impl Future for Service { - type Item = (); - type Error = Error; + type Output = Result<(), Error>; - fn poll(&mut self) -> Poll { - match self.essential_failed_rx.poll() { - Ok(Async::NotReady) => {}, - Ok(Async::Ready(_)) | Err(_) => { + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { + let this = Pin::into_inner(self); + + match Pin::new(&mut this.essential_failed_rx).poll_next(cx) { + Poll::Pending => {}, + Poll::Ready(_) => { // Ready(None) should not be possible since we hold a live // sender. - return Err(Error::Other("Essential task failed.".into())); + return Poll::Ready(Err(Error::Other("Essential task failed.".into()))); } } - while let Ok(Async::Ready(Some(task_to_spawn))) = self.to_spawn_rx.poll() { + while let Poll::Ready(Some(task_to_spawn)) = Pin::new(&mut this.to_spawn_rx).poll_next(cx) { + // TODO: Update to tokio 0.2 when libp2p get switched to std futures (#4383) let executor = tokio_executor::DefaultExecutor::current(); - if let Err(err) = executor.execute(task_to_spawn) { + use futures01::future::Executor; + if let Err(err) = executor.execute(task_to_spawn.unit_error().compat()) { debug!( target: "service", "Failed to spawn background task: {:?}; falling back to manual polling", err ); - self.to_poll.push(err.into_future()); + this.to_poll.push(Box::pin(err.into_future().compat().map(drop))); } } // Polling all the `to_poll` futures. - while let Some(pos) = self.to_poll.iter_mut().position(|t| t.poll().map(|t| t.is_ready()).unwrap_or(true)) { - let _ = self.to_poll.remove(pos); + while let Some(pos) = this.to_poll.iter_mut().position(|t| Pin::new(t).poll(cx).is_ready()) { + let _ = this.to_poll.remove(pos); } // The service future never ends. - Ok(Async::NotReady) + Poll::Pending } } -impl Executor + Send>> for +impl Spawn for Service { - fn execute( + fn spawn_obj( &self, - future: Box + Send> - ) -> Result<(), futures::future::ExecuteError + Send>>> { - if let Err(err) = self.to_spawn_tx.unbounded_send(future) { - let kind = futures::future::ExecuteErrorKind::Shutdown; - Err(futures::future::ExecuteError::new(kind, err.into_inner())) - } else { - Ok(()) - } + future: FutureObj<'static, ()> + ) -> Result<(), SpawnError> { + self.to_spawn_tx.unbounded_send(Box::pin(future)) + .map_err(|_| SpawnError::shutdown()) } } @@ -376,29 +370,23 @@ fn build_network_future< mut network: sc_network::NetworkWorker, client: Arc, status_sinks: Arc, NetworkState)>>>, - rpc_rx: futures03::channel::mpsc::UnboundedReceiver>, + mut rpc_rx: mpsc::UnboundedReceiver>, should_have_peers: bool, -) -> impl Future { - // Compatibility shim while we're transitioning to stable Futures. - // See https://github.com/paritytech/substrate/issues/3099 - let mut rpc_rx = futures03::compat::Compat::new(rpc_rx.map(|v| Ok::<_, ()>(v))); +) -> impl Future { + let mut imported_blocks_stream = client.import_notification_stream().fuse(); + let mut finality_notification_stream = client.finality_notification_stream().fuse(); - let mut imported_blocks_stream = client.import_notification_stream().fuse() - .map(|v| Ok::<_, ()>(v)).compat(); - let mut finality_notification_stream = client.finality_notification_stream().fuse() - .map(|v| Ok::<_, ()>(v)).compat(); - - futures::future::poll_fn(move || { + futures::future::poll_fn(move |cx| { let before_polling = Instant::now(); // We poll `imported_blocks_stream`. - while let Ok(Async::Ready(Some(notification))) = imported_blocks_stream.poll() { + while let Poll::Ready(Some(notification)) = Pin::new(&mut imported_blocks_stream).poll_next(cx) { network.on_block_imported(notification.hash, notification.header, Vec::new(), notification.is_new_best); } // We poll `finality_notification_stream`, but we only take the last event. let mut last = None; - while let Ok(Async::Ready(Some(item))) = finality_notification_stream.poll() { + while let Poll::Ready(Some(item)) = Pin::new(&mut finality_notification_stream).poll_next(cx) { last = Some(item); } if let Some(notification) = last { @@ -406,7 +394,7 @@ fn build_network_future< } // Poll the RPC requests and answer them. - while let Ok(Async::Ready(Some(request))) = rpc_rx.poll() { + while let Poll::Ready(Some(request)) = Pin::new(&mut rpc_rx).poll_next(cx) { match request { sc_rpc::system::Request::Health(sender) => { let _ = sender.send(sc_rpc::system::Health { @@ -466,7 +454,7 @@ fn build_network_future< } // Interval report for the external API. - status_sinks.lock().poll(|| { + status_sinks.lock().poll(cx, || { let status = NetworkStatus { sync_state: network.sync_state(), best_seen_block: network.best_seen_block(), @@ -481,12 +469,10 @@ fn build_network_future< }); // Main network polling. - let mut net_poll = futures03::future::poll_fn(|cx| futures03::future::Future::poll(Pin::new(&mut network), cx)) - .compat(); - if let Ok(Async::Ready(())) = net_poll.poll().map_err(|err| { + if let Poll::Ready(Ok(())) = Pin::new(&mut network).poll(cx).map_err(|err| { warn!(target: "service", "Error in network: {:?}", err); }) { - return Ok(Async::Ready(())); + return Poll::Ready(()); } // Now some diagnostic for performances. @@ -498,7 +484,7 @@ fn build_network_future< polling_dur ); - Ok(Async::NotReady) + Poll::Pending }) } @@ -596,7 +582,7 @@ impl RpcSession { /// messages. /// /// The `RpcSession` must be kept alive in order to receive messages on the sender. - pub fn new(sender: mpsc::Sender) -> RpcSession { + pub fn new(sender: futures01::sync::mpsc::Sender) -> RpcSession { RpcSession { metadata: sender.into(), } @@ -668,7 +654,7 @@ where let best_block_id = BlockId::hash(self.client.info().best_hash); let import_future = self.pool.submit_one(&best_block_id, uxt); let import_future = import_future - .then(move |import_result| { + .map(move |import_result| { match import_result { Ok(_) => report_handle.report_peer(who, reputation_change_good), Err(e) => match e.into_pool_error() { @@ -680,11 +666,9 @@ where Err(e) => debug!("Error converting pool error: {:?}", e), } } - ready(Ok(())) - }) - .compat(); + }); - if let Err(e) = self.executor.execute(Box::new(import_future)) { + if let Err(e) = self.executor.spawn(Box::new(import_future)) { warn!("Error scheduling extrinsic import: {:?}", e); } } @@ -700,7 +684,7 @@ where #[cfg(test)] mod tests { use super::*; - use futures03::executor::block_on; + use futures::executor::block_on; use sp_consensus::SelectChain; use sp_runtime::traits::BlindCheckable; use substrate_test_runtime_client::{prelude::*, runtime::{Extrinsic, Transfer}}; diff --git a/client/service/src/status_sinks.rs b/client/service/src/status_sinks.rs index 205a22d70f9f4..de5fe865736af 100644 --- a/client/service/src/status_sinks.rs +++ b/client/service/src/status_sinks.rs @@ -14,11 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use futures::prelude::*; -use futures::sync::mpsc; -use futures::stream::futures_unordered::FuturesUnordered; -use std::time::{Duration, Instant}; -use tokio_timer::Delay; +use futures::{Stream, stream::futures_unordered::FuturesUnordered, channel::mpsc}; +use std::time::Duration; +use std::pin::Pin; +use std::task::{Poll, Context}; +use futures_timer::Delay; /// Holds a list of `UnboundedSender`s, each associated with a certain time period. Every time the /// period elapses, we push an element on the sender. @@ -29,7 +29,7 @@ pub struct StatusSinks { } struct YieldAfter { - delay: tokio_timer::Delay, + delay: Delay, interval: Duration, sender: Option>, } @@ -47,7 +47,7 @@ impl StatusSinks { /// The `interval` is the time period between two pushes on the sender. pub fn push(&mut self, interval: Duration, sender: mpsc::UnboundedSender) { self.entries.push(YieldAfter { - delay: Delay::new(Instant::now() + interval), + delay: Delay::new(interval), interval, sender: Some(sender), }) @@ -57,16 +57,16 @@ impl StatusSinks { /// pushes what it returns to the sender. /// /// This function doesn't return anything, but it should be treated as if it implicitly - /// returns `Ok(Async::NotReady)`. In particular, it should be called again when the task + /// returns `Poll::Pending`. In particular, it should be called again when the task /// is waken up. /// /// # Panic /// /// Panics if not called within the context of a task. - pub fn poll(&mut self, mut status_grab: impl FnMut() -> T) { + pub fn poll(&mut self, cx: &mut Context, mut status_grab: impl FnMut() -> T) { loop { - match self.entries.poll() { - Ok(Async::Ready(Some((sender, interval)))) => { + match Pin::new(&mut self.entries).poll_next(cx) { + Poll::Ready(Some((sender, interval))) => { let status = status_grab(); if sender.unbounded_send(status).is_ok() { self.entries.push(YieldAfter { @@ -74,33 +74,32 @@ impl StatusSinks { // waken up and the moment it is polled, the period is actually not // `interval` but `interval + `. We ignore this problem in // practice. - delay: Delay::new(Instant::now() + interval), + delay: Delay::new(interval), interval, sender: Some(sender), }); } } - Err(()) | - Ok(Async::Ready(None)) | - Ok(Async::NotReady) => break, + Poll::Ready(None) | + Poll::Pending => break, } } } } -impl Future for YieldAfter { - type Item = (mpsc::UnboundedSender, Duration); - type Error = (); +impl futures::Future for YieldAfter { + type Output = (mpsc::UnboundedSender, Duration); - fn poll(&mut self) -> Poll { - match self.delay.poll() { - Ok(Async::NotReady) => Ok(Async::NotReady), - Ok(Async::Ready(())) => { - let sender = self.sender.take() + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { + let this = Pin::into_inner(self); + + match Pin::new(&mut this.delay).poll(cx) { + Poll::Pending => Poll::Pending, + Poll::Ready(()) => { + let sender = this.sender.take() .expect("sender is always Some unless the future is finished; qed"); - Ok(Async::Ready((sender, self.interval))) - }, - Err(_) => Err(()), + Poll::Ready((sender, this.interval)) + } } } } @@ -109,8 +108,9 @@ impl Future for YieldAfter { mod tests { use super::StatusSinks; use futures::prelude::*; - use futures::sync::mpsc; + use futures::channel::mpsc; use std::time::Duration; + use std::task::Poll; #[test] fn works() { @@ -125,18 +125,18 @@ mod tests { let mut runtime = tokio::runtime::Runtime::new().unwrap(); let mut val_order = 5; - runtime.spawn(futures::future::poll_fn(move || { - status_sinks.poll(|| { val_order += 1; val_order }); - Ok(Async::NotReady) + runtime.spawn(futures::future::poll_fn(move |cx| { + status_sinks.poll(cx, || { val_order += 1; val_order }); + Poll::<()>::Pending })); let done = rx .into_future() - .and_then(|(item, rest)| { + .then(|(item, rest)| { assert_eq!(item, Some(6)); rest.into_future() }) - .and_then(|(item, rest)| { + .then(|(item, rest)| { assert_eq!(item, Some(7)); rest.into_future() }) @@ -144,6 +144,6 @@ mod tests { assert_eq!(item, Some(8)); }); - runtime.block_on(done).unwrap(); + runtime.block_on(done); } } diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index 2789bfda0fe19..01ccf71a4bd7b 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -7,15 +7,15 @@ edition = "2018" [dependencies] tempfile = "3.1.0" tokio = "0.1.22" -futures = "0.1.29" +futures01 = { package = "futures", version = "0.1.29" } log = "0.4.8" env_logger = "0.7.0" fdlimit = "0.1.1" -futures03 = { package = "futures", version = "0.3.1", features = ["compat"] } -sc-service = { version = "2.0.0", default-features = false, path = "../../service" } +futures = { version = "0.3.1", features = ["compat"] } +sc-service = { version = "0.8.0", default-features = false, path = "../../service" } sc-network = { version = "0.8", path = "../../network" } sp-consensus = { version = "0.8", path = "../../../primitives/consensus/common" } -sc-client = { version = "2.0.0", path = "../../" } +sc-client = { version = "0.8", path = "../../" } sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } sp-core = { version = "2.0.0", path = "../../../primitives/core" } sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index c8dead0845c65..c9a01b2b2f2c8 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -21,7 +21,7 @@ use std::sync::{Arc, Mutex, MutexGuard}; use std::net::Ipv4Addr; use std::time::Duration; use log::info; -use futures::{Future, Stream, Poll}; +use futures01::{Future, Stream, Poll}; use tempfile::TempDir; use tokio::{runtime::Runtime, prelude::FutureExt}; use tokio::timer::Interval; @@ -72,12 +72,13 @@ impl From for SyncService { } } -impl> Future for SyncService { +impl> + Unpin> Future for SyncService { type Item = (); type Error = sc_service::Error; fn poll(&mut self) -> Poll { - self.0.lock().unwrap().poll() + let mut f = self.0.lock().unwrap(); + futures::compat::Compat::new(&mut *f).poll() } } @@ -458,7 +459,7 @@ pub fn sync( let first_user_data = &network.full_nodes[0].2; let best_block = BlockId::number(first_service.get().client().chain_info().best_number); let extrinsic = extrinsic_factory(&first_service.get(), first_user_data); - futures03::executor::block_on(first_service.get().transaction_pool().submit_one(&best_block, extrinsic)).unwrap(); + futures::executor::block_on(first_service.get().transaction_pool().submit_one(&best_block, extrinsic)).unwrap(); network.run_until_all_full( |_index, service| service.get().transaction_pool().ready().count() == 1, |_index, _service| true, diff --git a/client/src/call_executor.rs b/client/src/call_executor.rs index 6a41eba0108c0..6c685fc1b82c7 100644 --- a/client/src/call_executor.rs +++ b/client/src/call_executor.rs @@ -17,7 +17,7 @@ use std::{sync::Arc, panic::UnwindSafe, result, cell::RefCell}; use codec::{Encode, Decode}; use sp_runtime::{ - generic::BlockId, traits::{Block as BlockT, HasherFor}, + generic::BlockId, traits::{Block as BlockT, HasherFor, NumberFor}, }; use sp_state_machine::{ self, OverlayedChanges, Ext, ExecutionManager, StateMachine, ExecutionStrategy, @@ -61,7 +61,7 @@ impl Clone for LocalCallExecutor where E: Clone { impl CallExecutor for LocalCallExecutor where B: backend::Backend, - E: CodeExecutor + RuntimeInfo, + E: CodeExecutor + RuntimeInfo + Clone + 'static, Block: BlockT, { type Error = E::Error; @@ -80,7 +80,7 @@ where let state = self.backend.state_at(*id)?; let return_data = StateMachine::new( &state, - self.backend.changes_trie_storage(), + backend::changes_tries_state_at_block(id, self.backend.changes_trie_storage())?, &mut changes, &self.executor, method, @@ -132,6 +132,7 @@ where } let mut state = self.backend.state_at(*at)?; + let changes_trie_state = backend::changes_tries_state_at_block(at, self.backend.changes_trie_storage())?; let mut storage_transaction_cache = storage_transaction_cache.map(|c| c.borrow_mut()); @@ -150,7 +151,7 @@ where StateMachine::new( &backend, - self.backend.changes_trie_storage(), + changes_trie_state, &mut *changes.borrow_mut(), &self.executor, method, @@ -163,7 +164,7 @@ where } None => StateMachine::new( &state, - self.backend.changes_trie_storage(), + changes_trie_state, &mut *changes.borrow_mut(), &self.executor, method, @@ -183,13 +184,13 @@ where fn runtime_version(&self, id: &BlockId) -> sp_blockchain::Result { let mut overlay = OverlayedChanges::default(); let state = self.backend.state_at(*id)?; + let changes_trie_state = backend::changes_tries_state_at_block(id, self.backend.changes_trie_storage())?; let mut cache = StorageTransactionCache::::default(); - let mut ext = Ext::new( &mut overlay, &mut cache, &state, - self.backend.changes_trie_storage(), + changes_trie_state, None, ); let version = self.executor.runtime_version(&mut ext); @@ -207,7 +208,7 @@ where method: &str, call_data: &[u8] ) -> Result<(Vec, StorageProof), sp_blockchain::Error> { - sp_state_machine::prove_execution_on_trie_backend( + sp_state_machine::prove_execution_on_trie_backend::<_, _, NumberFor, _>( trie_state, overlay, &self.executor, @@ -225,7 +226,7 @@ where impl sp_version::GetRuntimeVersion for LocalCallExecutor where B: backend::Backend, - E: CodeExecutor + RuntimeInfo, + E: CodeExecutor + RuntimeInfo + Clone + 'static, Block: BlockT, { fn native_version(&self) -> &sp_version::NativeVersion { diff --git a/client/src/client.rs b/client/src/client.rs index 2babf6f3e900e..c74a005c6fea6 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -66,6 +66,7 @@ pub use sc_client_api::{ backend::{ self, BlockImportOperation, PrunableStateChangesTrieStorage, ClientImportOperation, Finalizer, ImportSummary, NewBlockState, + changes_tries_state_at_block, }, client::{ ImportNotifications, FinalityNotification, FinalityNotifications, BlockImportNotification, @@ -129,7 +130,7 @@ impl PrePostHeader { /// Create an instance of in-memory client. pub fn new_in_mem( executor: E, - genesis_storage: S, + genesis_storage: &S, keystore: Option, ) -> sp_blockchain::Result, @@ -149,14 +150,14 @@ pub fn new_in_mem( pub fn new_with_backend( backend: Arc, executor: E, - build_genesis_storage: S, + build_genesis_storage: &S, keystore: Option, ) -> sp_blockchain::Result, Block, RA>> where E: CodeExecutor + RuntimeInfo, S: BuildStorage, Block: BlockT, - B: backend::LocalBackend + B: backend::LocalBackend + 'static, { let call_executor = LocalCallExecutor::new(backend.clone(), executor); let extensions = ExecutionExtensions::new(Default::default(), keystore); @@ -187,7 +188,7 @@ impl Client where pub fn new( backend: Arc, executor: E, - build_genesis_storage: S, + build_genesis_storage: &S, fork_blocks: ForkBlocks, bad_blocks: BadBlocks, execution_extensions: ExecutionExtensions, @@ -407,18 +408,26 @@ impl Client where first: NumberFor, last: BlockId, ) -> sp_blockchain::Result, BlockId)>> { - let (config, storage) = match self.require_changes_trie().ok() { - Some((config, storage)) => (config, storage), + let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?; + let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?; + if first > last_number { + return Err(sp_blockchain::Error::ChangesTrieAccessFailed("Invalid changes trie range".into())); + } + + let (storage, configs) = match self.require_changes_trie(first, last_hash, false).ok() { + Some((storage, configs)) => (storage, configs), None => return Ok(None), }; - let last_num = self.backend.blockchain().expect_block_number_from_id(&last)?; - if first > last_num { - return Err(sp_blockchain::Error::ChangesTrieAccessFailed("Invalid changes trie range".into())); + + let first_available_changes_trie = configs.last().map(|config| config.0); + match first_available_changes_trie { + Some(first_available_changes_trie) => { + let oldest_unpruned = storage.oldest_pruned_digest_range_end(); + let first = std::cmp::max(first_available_changes_trie, oldest_unpruned); + Ok(Some((first, last))) + }, + None => Ok(None) } - let finalized_number = self.backend.blockchain().info().finalized_number; - let oldest = storage.oldest_changes_trie_block(&config, finalized_number); - let first = ::std::cmp::max(first, oldest); - Ok(Some((first, last))) } /// Get pairs of (block, extrinsic) where key has been changed at given blocks range. @@ -432,30 +441,42 @@ impl Client where storage_key: Option<&StorageKey>, key: &StorageKey ) -> sp_blockchain::Result, u32)>> { - let (config, storage) = self.require_changes_trie()?; let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?; let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?; + let (storage, configs) = self.require_changes_trie(first, last_hash, true)?; + + let mut result = Vec::new(); + let best_number = self.backend.blockchain().info().best_number; + for (config_zero, config_end, config) in configs { + let range_first = ::std::cmp::max(first, config_zero + One::one()); + let range_anchor = match config_end { + Some((config_end_number, config_end_hash)) => if last_number > config_end_number { + ChangesTrieAnchorBlockId { hash: config_end_hash, number: config_end_number } + } else { + ChangesTrieAnchorBlockId { hash: convert_hash(&last_hash), number: last_number } + }, + None => ChangesTrieAnchorBlockId { hash: convert_hash(&last_hash), number: last_number }, + }; - // FIXME: remove this in https://github.com/paritytech/substrate/pull/3201 - let config_range = ChangesTrieConfigurationRange { - config: &config, - zero: Zero::zero(), - end: None, - }; + let config_range = ChangesTrieConfigurationRange { + config: &config, + zero: config_zero.clone(), + end: config_end.map(|(config_end_number, _)| config_end_number), + }; + let result_range: Vec<(NumberFor, u32)> = key_changes::, _>( + config_range, + storage.storage(), + range_first, + &range_anchor, + best_number, + storage_key.as_ref().map(|x| &x.0[..]), + &key.0) + .and_then(|r| r.map(|r| r.map(|(block, tx)| (block, tx))).collect::>()) + .map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err))?; + result.extend(result_range); + } - key_changes::, _>( - config_range, - &*storage, - first, - &ChangesTrieAnchorBlockId { - hash: convert_hash(&last_hash), - number: last_number, - }, - self.backend.blockchain().info().best_number, - storage_key.as_ref().map(|sk| sk.0.as_slice()), - &key.0) - .and_then(|r| r.map(|r| r.map(|(block, tx)| (block, tx))).collect::>()) - .map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err)) + Ok(result) } /// Get proof for computation of (block, extrinsic) pairs where key has been changed at given blocks range. @@ -550,11 +571,13 @@ impl Client where } } - let (config, storage) = self.require_changes_trie()?; + let first_number = self.backend.blockchain() + .expect_block_number_from_id(&BlockId::Hash(first))?; + let (storage, configs) = self.require_changes_trie(first_number, last, true)?; let min_number = self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(min))?; let recording_storage = AccessedRootsRecorder:: { - storage, + storage: storage.storage(), min: min_number, required_roots_proofs: Mutex::new(BTreeMap::new()), }; @@ -564,31 +587,31 @@ impl Client where self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(max))?, ); - // FIXME: remove this in https://github.com/paritytech/substrate/pull/3201 - let config_range = ChangesTrieConfigurationRange { - config: &config, - zero: Zero::zero(), - end: None, - }; - // fetch key changes proof - let first_number = self.backend.blockchain() - .expect_block_number_from_id(&BlockId::Hash(first))?; - let last_number = self.backend.blockchain() - .expect_block_number_from_id(&BlockId::Hash(last))?; - let key_changes_proof = key_changes_proof::, _>( - config_range, - &recording_storage, - first_number, - &ChangesTrieAnchorBlockId { - hash: convert_hash(&last), - number: last_number, - }, - max_number, - storage_key.as_ref().map(|sk| sk.0.as_slice()), - &key.0, - ) - .map_err(|err| sp_blockchain::Error::from(sp_blockchain::Error::ChangesTrieAccessFailed(err)))?; + let mut proof = Vec::new(); + for (config_zero, config_end, config) in configs { + let last_number = self.backend.blockchain() + .expect_block_number_from_id(&BlockId::Hash(last))?; + let config_range = ChangesTrieConfigurationRange { + config: &config, + zero: config_zero, + end: config_end.map(|(config_end_number, _)| config_end_number), + }; + let proof_range = key_changes_proof::, _>( + config_range, + &recording_storage, + first_number, + &ChangesTrieAnchorBlockId { + hash: convert_hash(&last), + number: last_number, + }, + max_number, + storage_key.as_ref().map(|x| &x.0[..]), + &key.0, + ) + .map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err))?; + proof.extend(proof_range); + } // now gather proofs for all changes tries roots that were touched during key_changes_proof // execution AND are unknown (i.e. replaced with CHT) to the requester @@ -597,7 +620,7 @@ impl Client where Ok(ChangesProof { max_block: max_number, - proof: key_changes_proof, + proof, roots: roots.into_iter().map(|(n, h)| (n, convert_hash(&h))).collect(), roots_proof, }) @@ -650,14 +673,45 @@ impl Client where Ok(proof) } - /// Returns changes trie configuration and storage or an error if it is not supported. - fn require_changes_trie(&self) -> sp_blockchain::Result<(ChangesTrieConfiguration, &B::ChangesTrieStorage)> { - let config = self.changes_trie_config()?; - let storage = self.backend.changes_trie_storage(); - match (config, storage) { - (Some(config), Some(storage)) => Ok((config, storage)), - _ => Err(sp_blockchain::Error::ChangesTriesNotSupported.into()), + /// Returns changes trie storage and all configurations that have been active in the range [first; last]. + /// + /// Configurations are returned in descending order (and obviously never overlap). + /// If fail_if_disabled is false, returns maximal consequent configurations ranges, starting from last and + /// stopping on either first, or when CT have been disabled. + /// If fail_if_disabled is true, fails when there's a subrange where CT have been disabled + /// inside first..last blocks range. + fn require_changes_trie( + &self, + first: NumberFor, + last: Block::Hash, + fail_if_disabled: bool, + ) -> sp_blockchain::Result<( + &dyn PrunableStateChangesTrieStorage, + Vec<(NumberFor, Option<(NumberFor, Block::Hash)>, ChangesTrieConfiguration)>, + )> { + let storage = match self.backend.changes_trie_storage() { + Some(storage) => storage, + None => return Err(sp_blockchain::Error::ChangesTriesNotSupported), + }; + + let mut configs = Vec::with_capacity(1); + let mut current = last; + loop { + let config_range = storage.configuration_at(&BlockId::Hash(current))?; + match config_range.config { + Some(config) => configs.push((config_range.zero.0, config_range.end, config)), + None if !fail_if_disabled => return Ok((storage, configs)), + None => return Err(sp_blockchain::Error::ChangesTriesNotSupported), + } + + if config_range.zero.0 < first { + break; + } + + current = *self.backend.blockchain().expect_header(BlockId::Hash(config_range.zero.1))?.parent_hash(); } + + Ok((storage, configs)) } /// Create a new block, built on the head of the chain. @@ -750,7 +804,6 @@ impl Client where import_block: BlockImportParams>, new_cache: HashMap>, ) -> sp_blockchain::Result where - E: CallExecutor + Send + Sync + Clone, Self: ProvideRuntimeApi, >::Api: CoreApi + ApiExt, @@ -829,7 +882,6 @@ impl Client where fork_choice: ForkChoiceStrategy, import_existing: bool, ) -> sp_blockchain::Result where - E: CallExecutor + Send + Sync + Clone, Self: ProvideRuntimeApi, >::Api: CoreApi + ApiExt, @@ -990,10 +1042,14 @@ impl Client where )?; let state = self.backend.state_at(at)?; + let changes_trie_state = changes_tries_state_at_block( + &at, + self.backend.changes_trie_storage(), + )?; let gen_storage_changes = runtime_api.into_storage_changes( &state, - self.backend.changes_trie_storage(), + changes_trie_state.as_ref(), *parent_hash, ); @@ -1251,13 +1307,6 @@ impl Client where Ok(uncles) } - fn changes_trie_config(&self) -> Result, Error> { - Ok(self.backend.state_at(BlockId::Number(self.backend.blockchain().info().best_number))? - .storage(well_known_keys::CHANGES_TRIE_CONFIG) - .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? - .and_then(|c| Decode::decode(&mut &*c).ok())) - } - /// Prepare in-memory header that is used in execution environment. fn prepare_environment_block(&self, parent: &BlockId) -> sp_blockchain::Result { let parent_header = self.backend.blockchain().expect_header(*parent)?; @@ -1387,7 +1436,7 @@ impl ProvideCache for Client where impl ProvideRuntimeApi for Client where B: backend::Backend, - E: CallExecutor + Clone + Send + Sync, + E: CallExecutor + Send + Sync, Block: BlockT, RA: ConstructRuntimeApi, { @@ -1400,7 +1449,7 @@ impl ProvideRuntimeApi for Client where impl CallApiAt for Client where B: backend::Backend, - E: CallExecutor + Clone + Send + Sync, + E: CallExecutor + Send + Sync, Block: BlockT, { type Error = Error; @@ -1448,7 +1497,7 @@ impl CallApiAt for Client where /// important verification work. impl sp_consensus::BlockImport for &Client where B: backend::Backend, - E: CallExecutor + Clone + Send + Sync, + E: CallExecutor + Send + Sync, Block: BlockT, Client: ProvideRuntimeApi, as ProvideRuntimeApi>::Api: CoreApi + @@ -1552,7 +1601,7 @@ impl sp_consensus::BlockImport for &Client sp_consensus::BlockImport for Client where B: backend::Backend, - E: CallExecutor + Clone + Send + Sync, + E: CallExecutor + Send + Sync, Block: BlockT, Self: ProvideRuntimeApi, >::Api: CoreApi + @@ -1873,7 +1922,8 @@ pub(crate) mod tests { // prepare client ang import blocks let mut local_roots = Vec::new(); - let mut remote_client = TestClientBuilder::new().set_support_changes_trie(true).build(); + let config = Some(ChangesTrieConfiguration::new(4, 2)); + let mut remote_client = TestClientBuilder::new().changes_trie_config(config).build(); let mut nonces: HashMap<_, u64> = Default::default(); for (i, block_transfers) in blocks_transfers.into_iter().enumerate() { let mut builder = remote_client.new_block(Default::default()).unwrap(); @@ -2909,13 +2959,15 @@ pub(crate) mod tests { .unwrap().build().unwrap().block; client.import(BlockOrigin::Own, b2.clone()).unwrap(); + // prepare B3 before we finalize A2, because otherwise we won't be able to + // read changes trie configuration after A2 is finalized + let b3 = client.new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap().build().unwrap().block; + // we will finalize A2 which should make it impossible to import a new // B3 at the same height but that doesnt't include it ClientExt::finalize_block(&client, BlockId::Hash(a2.hash()), None).unwrap(); - let b3 = client.new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) - .unwrap().build().unwrap().block; - let import_err = client.import(BlockOrigin::Own, b3).err().unwrap(); let expected_err = ConsensusError::ClientImport( sp_blockchain::Error::NotInFinalizedChain.to_string() @@ -3052,4 +3104,104 @@ pub(crate) mod tests { check_block_b1.parent_hash = H256::random(); assert_eq!(client.check_block(check_block_b1.clone()).unwrap(), ImportResult::UnknownParent); } + + #[test] + fn imports_blocks_with_changes_tries_config_change() { + // create client with initial 4^2 configuration + let mut client = TestClientBuilder::with_default_backend() + .changes_trie_config(Some(ChangesTrieConfiguration { + digest_interval: 4, + digest_levels: 2, + })).build(); + + // =================================================================== + // blocks 1,2,3,4,5,6,7,8,9,10 are empty + // block 11 changes the key + // block 12 is the L1 digest that covers this change + // blocks 13,14,15,16,17,18,19,20,21,22 are empty + // block 23 changes the configuration to 5^1 AND is skewed digest + // =================================================================== + // blocks 24,25 are changing the key + // block 26 is empty + // block 27 changes the key + // block 28 is the L1 digest (NOT SKEWED!!!) that covers changes AND changes configuration to 3^1 + // =================================================================== + // block 29 is empty + // block 30 changes the key + // block 31 is L1 digest that covers this change + // =================================================================== + (1..11).for_each(|number| { + let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (11..12).for_each(|number| { + let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); + block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); + let block = block.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (12..23).for_each(|number| { + let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (23..24).for_each(|number| { + let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); + block.push_changes_trie_configuration_update(Some(ChangesTrieConfiguration { + digest_interval: 5, + digest_levels: 1, + })).unwrap(); + let block = block.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (24..26).for_each(|number| { + let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); + block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); + let block = block.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (26..27).for_each(|number| { + let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (27..28).for_each(|number| { + let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); + block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); + let block = block.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (28..29).for_each(|number| { + let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); + block.push_changes_trie_configuration_update(Some(ChangesTrieConfiguration { + digest_interval: 3, + digest_levels: 1, + })).unwrap(); + let block = block.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (29..30).for_each(|number| { + let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (30..31).for_each(|number| { + let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); + block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); + let block = block.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (31..32).for_each(|number| { + let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + + // now check that configuration cache works + assert_eq!( + client.key_changes(1, BlockId::Number(31), None, &StorageKey(vec![42])).unwrap(), + vec![(30, 0), (27, 0), (25, 0), (24, 0), (11, 0)] + ); + } } diff --git a/client/src/genesis.rs b/client/src/genesis.rs index ab89bdd4d5e13..fccdd71817e1f 100644 --- a/client/src/genesis.rs +++ b/client/src/genesis.rs @@ -45,7 +45,7 @@ mod tests { use codec::{Encode, Decode, Joiner}; use sc_executor::native_executor_instance; use sp_state_machine::{ - StateMachine, OverlayedChanges, ExecutionStrategy, InMemoryChangesTrieStorage, + StateMachine, OverlayedChanges, ExecutionStrategy, InMemoryBackend, }; use substrate_test_runtime_client::{ @@ -59,7 +59,7 @@ mod tests { native_executor_instance!( Executor, substrate_test_runtime_client::runtime::api::dispatch, - substrate_test_runtime_client::runtime::native_version + substrate_test_runtime_client::runtime::native_version, ); fn executor() -> sc_executor::NativeExecutor { @@ -92,7 +92,7 @@ mod tests { StateMachine::new( backend, - Some(&InMemoryChangesTrieStorage::<_, u64>::new()), + sp_state_machine::disabled_changes_trie_state::<_, u64>(), &mut overlay, &executor(), "Core_initialize_block", @@ -105,7 +105,7 @@ mod tests { for tx in transactions.iter() { StateMachine::new( backend, - Some(&InMemoryChangesTrieStorage::<_, u64>::new()), + sp_state_machine::disabled_changes_trie_state::<_, u64>(), &mut overlay, &executor(), "BlockBuilder_apply_extrinsic", @@ -118,7 +118,7 @@ mod tests { let ret_data = StateMachine::new( backend, - Some(&InMemoryChangesTrieStorage::<_, u64>::new()), + sp_state_machine::disabled_changes_trie_state::<_, u64>(), &mut overlay, &executor(), "BlockBuilder_finalize_block", @@ -150,7 +150,7 @@ mod tests { #[test] fn construct_genesis_should_work_with_native() { let mut storage = GenesisConfig::new( - false, + None, vec![Sr25519Keyring::One.public().into(), Sr25519Keyring::Two.public().into()], vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], 1000, @@ -165,7 +165,7 @@ mod tests { let mut overlay = OverlayedChanges::default(); let _ = StateMachine::new( &backend, - Some(&InMemoryChangesTrieStorage::<_, u64>::new()), + sp_state_machine::disabled_changes_trie_state::<_, u64>(), &mut overlay, &executor(), "Core_execute_block", @@ -178,7 +178,7 @@ mod tests { #[test] fn construct_genesis_should_work_with_wasm() { - let mut storage = GenesisConfig::new(false, + let mut storage = GenesisConfig::new(None, vec![Sr25519Keyring::One.public().into(), Sr25519Keyring::Two.public().into()], vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], 1000, @@ -193,7 +193,7 @@ mod tests { let mut overlay = OverlayedChanges::default(); let _ = StateMachine::new( &backend, - Some(&InMemoryChangesTrieStorage::<_, u64>::new()), + sp_state_machine::disabled_changes_trie_state::<_, u64>(), &mut overlay, &executor(), "Core_execute_block", @@ -206,7 +206,7 @@ mod tests { #[test] fn construct_genesis_with_bad_transaction_should_panic() { - let mut storage = GenesisConfig::new(false, + let mut storage = GenesisConfig::new(None, vec![Sr25519Keyring::One.public().into(), Sr25519Keyring::Two.public().into()], vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], 68, @@ -221,7 +221,7 @@ mod tests { let mut overlay = OverlayedChanges::default(); let r = StateMachine::new( &backend, - Some(&InMemoryChangesTrieStorage::<_, u64>::new()), + sp_state_machine::disabled_changes_trie_state::<_, u64>(), &mut overlay, &executor(), "Core_execute_block", diff --git a/client/src/in_mem.rs b/client/src/in_mem.rs index 5d0d14c1cb1d7..dcff8102aeb6d 100644 --- a/client/src/in_mem.rs +++ b/client/src/in_mem.rs @@ -16,26 +16,24 @@ //! In memory client backend -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use std::sync::Arc; use parking_lot::RwLock; -use sp_core::{ChangesTrieConfiguration, storage::well_known_keys}; +use sp_core::storage::well_known_keys; use sp_core::offchain::storage::{ InMemOffchainStorage as OffchainStorage }; -use sp_runtime::generic::{BlockId, DigestItem}; +use sp_runtime::generic::BlockId; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Zero, NumberFor, HasherFor}; use sp_runtime::{Justification, Storage}; use sp_state_machine::{ - InMemoryChangesTrieStorage, ChangesTrieAnchorBlockId, ChangesTrieTransaction, - InMemoryBackend, Backend as StateBackend, + ChangesTrieTransaction, InMemoryBackend, Backend as StateBackend, StorageCollection, + ChildStorageCollection, }; -use hash_db::Prefix; -use sp_trie::MemoryDB; use sp_blockchain::{CachedHeaderMetadata, HeaderMetadata}; use sc_client_api::{ - backend::{self, NewBlockState, StorageCollection, ChildStorageCollection}, + backend::{self, NewBlockState}, blockchain::{ self, BlockStatus, HeaderBackend, well_known_cache_keys::Id as CacheKeyId }, @@ -466,7 +464,6 @@ pub struct BlockImportOperation { pending_cache: HashMap>, old_state: InMemoryBackend>, new_state: Option>>, - changes_trie_update: Option>>, aux: Vec<(Vec, Option>)>, finalized_blocks: Vec<(BlockId, Option)>, set_head: Option>, @@ -510,9 +507,8 @@ impl backend::BlockImportOperation for BlockImportOperatio fn update_changes_trie( &mut self, - update: ChangesTrieTransaction, NumberFor>, + _update: ChangesTrieTransaction, NumberFor>, ) -> sp_blockchain::Result<()> { - self.changes_trie_update = Some(update.0); Ok(()) } @@ -569,7 +565,6 @@ impl backend::BlockImportOperation for BlockImportOperatio /// > struct for testing purposes. Do **NOT** use in production. pub struct Backend where Block::Hash: Ord { states: RwLock>>>, - changes_trie_storage: ChangesTrieStorage, blockchain: Blockchain, import_lock: RwLock<()>, } @@ -579,7 +574,6 @@ impl Backend where Block::Hash: Ord { pub fn new() -> Self { Backend { states: RwLock::new(HashMap::new()), - changes_trie_storage: ChangesTrieStorage(InMemoryChangesTrieStorage::new()), blockchain: Blockchain::new(), import_lock: Default::default(), } @@ -606,7 +600,6 @@ impl backend::Backend for Backend where Block::Hash type BlockImportOperation = BlockImportOperation; type Blockchain = Blockchain; type State = InMemoryBackend>; - type ChangesTrieStorage = ChangesTrieStorage; type OffchainStorage = OffchainStorage; fn begin_operation(&self) -> sp_blockchain::Result { @@ -616,7 +609,6 @@ impl backend::Backend for Backend where Block::Hash pending_cache: Default::default(), old_state, new_state: None, - changes_trie_update: None, aux: Default::default(), finalized_blocks: Default::default(), set_head: None, @@ -647,17 +639,6 @@ impl backend::Backend for Backend where Block::Hash self.states.write().insert(hash, operation.new_state.unwrap_or_else(|| old_state.clone())); - let maybe_changes_trie_root = header.digest().log(DigestItem::as_changes_trie_root).cloned(); - if let Some(changes_trie_root) = maybe_changes_trie_root { - if let Some(changes_trie_update) = operation.changes_trie_update { - self.changes_trie_storage.0.insert( - *header.number(), - changes_trie_root, - changes_trie_update - ); - } - } - self.blockchain.insert(hash, header, justification, body, pending_block.state)?; } @@ -688,8 +669,8 @@ impl backend::Backend for Backend where Block::Hash None } - fn changes_trie_storage(&self) -> Option<&Self::ChangesTrieStorage> { - Some(&self.changes_trie_storage) + fn changes_trie_storage(&self) -> Option<&dyn backend::PrunableStateChangesTrieStorage> { + None } fn offchain_storage(&self) -> Option { @@ -737,66 +718,6 @@ impl backend::RemoteBackend for Backend where Block } } -/// Prunable in-memory changes trie storage. -pub struct ChangesTrieStorage( - InMemoryChangesTrieStorage, NumberFor> -); - -impl backend::PrunableStateChangesTrieStorage for ChangesTrieStorage { - fn oldest_changes_trie_block( - &self, - _config: &ChangesTrieConfiguration, - _best_finalized: NumberFor, - ) -> NumberFor { - Zero::zero() - } -} - -impl sp_state_machine::ChangesTrieRootsStorage, NumberFor> for - ChangesTrieStorage -{ - fn build_anchor( - &self, - _hash: Block::Hash, - ) -> Result>, String> { - Err("Dummy implementation".into()) - } - - fn root( - &self, - _anchor: &ChangesTrieAnchorBlockId>, - _block: NumberFor, - ) -> Result, String> { - Err("Dummy implementation".into()) - } -} - -impl sp_state_machine::ChangesTrieStorage, NumberFor> for - ChangesTrieStorage -{ - fn as_roots_storage(&self) - -> &dyn sp_state_machine::ChangesTrieRootsStorage, NumberFor> - { - self - } - - fn with_cached_changed_keys( - &self, - _root: &Block::Hash, - _functor: &mut dyn FnMut(&HashMap>, HashSet>>), - ) -> bool { - false - } - - fn get( - &self, - key: &Block::Hash, - prefix: Prefix, - ) -> Result, String> { - self.0.get(key, prefix) - } -} - /// Check that genesis storage is valid. pub fn check_genesis_storage(storage: &Storage) -> sp_blockchain::Result<()> { if storage.top.iter().any(|(k, _)| well_known_keys::is_child_storage_key(k)) { diff --git a/client/src/lib.rs b/client/src/lib.rs index cfaf08ca7ef6e..db2d4785a2c98 100644 --- a/client/src/lib.rs +++ b/client/src/lib.rs @@ -65,7 +65,7 @@ //! NativeExecutor::::new(WasmExecutionMethod::Interpreted, None), //! ), //! // This parameter provides the storage for the chain genesis. -//! ::default(), +//! &::default(), //! Default::default(), //! Default::default(), //! Default::default(), diff --git a/client/src/light/backend.rs b/client/src/light/backend.rs index 65fd94e32395c..ad9f43587e4cd 100644 --- a/client/src/light/backend.rs +++ b/client/src/light/backend.rs @@ -21,19 +21,23 @@ use std::collections::HashMap; use std::sync::Arc; use parking_lot::RwLock; -use sp_core::storage::{ChildInfo, OwnedChildInfo}; +use codec::{Decode, Encode}; + +use sp_core::ChangesTrieConfiguration; +use sp_core::storage::{well_known_keys, ChildInfo, OwnedChildInfo}; use sp_core::offchain::storage::InMemOffchainStorage; use sp_state_machine::{ - Backend as StateBackend, TrieBackend, InMemoryBackend, ChangesTrieTransaction + Backend as StateBackend, TrieBackend, InMemoryBackend, ChangesTrieTransaction, + StorageCollection, ChildStorageCollection, }; use sp_runtime::{generic::BlockId, Justification, Storage}; use sp_runtime::traits::{Block as BlockT, NumberFor, Zero, Header, HasherFor}; -use crate::in_mem::{self, check_genesis_storage}; +use crate::in_mem::check_genesis_storage; use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sc_client_api::{ backend::{ AuxStore, Backend as ClientBackend, BlockImportOperation, RemoteBackend, NewBlockState, - StorageCollection, ChildStorageCollection, + PrunableStateChangesTrieStorage, }, blockchain::{ HeaderBackend as BlockchainHeaderBackend, well_known_cache_keys, @@ -62,6 +66,7 @@ pub struct ImportOperation { finalized_blocks: Vec>, set_head: Option>, storage_update: Option>>, + changes_trie_config_update: Option>, _phantom: std::marker::PhantomData, } @@ -115,7 +120,6 @@ impl ClientBackend for Backend> type BlockImportOperation = ImportOperation; type Blockchain = Blockchain; type State = GenesisOrUnavailableState>; - type ChangesTrieStorage = in_mem::ChangesTrieStorage; type OffchainStorage = InMemOffchainStorage; fn begin_operation(&self) -> ClientResult { @@ -127,6 +131,7 @@ impl ClientBackend for Backend> finalized_blocks: Vec::new(), set_head: None, storage_update: None, + changes_trie_config_update: None, _phantom: Default::default(), }) } @@ -148,6 +153,9 @@ impl ClientBackend for Backend> if let Some(header) = operation.header { let is_genesis_import = header.number().is_zero(); + if let Some(new_config) = operation.changes_trie_config_update { + operation.cache.insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_config.encode()); + } self.blockchain.storage().import_header( header, operation.cache, @@ -194,7 +202,7 @@ impl ClientBackend for Backend> self.blockchain.storage().usage_info() } - fn changes_trie_storage(&self) -> Option<&Self::ChangesTrieStorage> { + fn changes_trie_storage(&self) -> Option<&dyn PrunableStateChangesTrieStorage> { None } @@ -296,6 +304,13 @@ impl BlockImportOperation for ImportOperation fn reset_storage(&mut self, input: Storage) -> ClientResult { check_genesis_storage(&input)?; + // changes trie configuration + let changes_trie_config = input.top.iter() + .find(|(k, _)| &k[..] == well_known_keys::CHANGES_TRIE_CONFIG) + .map(|(_, v)| Decode::decode(&mut &v[..]) + .expect("changes trie configuration is encoded properly at genesis")); + self.changes_trie_config_update = Some(changes_trie_config); + // this is only called when genesis block is imported => shouldn't be performance bottleneck let mut storage: HashMap, OwnedChildInfo)>, _> = HashMap::new(); storage.insert(None, input.top); diff --git a/client/src/light/call_executor.rs b/client/src/light/call_executor.rs index 45d9bf303c0cb..01a93c78219bc 100644 --- a/client/src/light/call_executor.rs +++ b/client/src/light/call_executor.rs @@ -222,7 +222,7 @@ pub fn check_execution_proof( ) -> ClientResult> where Header: HeaderT, - E: CodeExecutor, + E: CodeExecutor + Clone + 'static, H: Hasher, H::Out: Ord + codec::Codec + 'static, { @@ -248,7 +248,7 @@ fn check_execution_proof_with_make_header ClientResult> where Header: HeaderT, - E: CodeExecutor, + E: CodeExecutor + Clone + 'static, H: Hasher, H::Out: Ord + codec::Codec + 'static, { @@ -259,7 +259,7 @@ fn check_execution_proof_with_make_header( + execution_proof_check_on_trie_backend::( &trie_backend, &mut changes, executor, @@ -268,7 +268,7 @@ fn check_execution_proof_with_make_header( + execution_proof_check_on_trie_backend::( &trie_backend, &mut changes, executor, diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index d746b3abe5549..d66108b7f0adb 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -25,12 +25,12 @@ use codec::{Decode, Encode}; use sp_core::{convert_hash, traits::CodeExecutor}; use sp_runtime::traits::{ Block as BlockT, Header as HeaderT, Hash, HashFor, NumberFor, - SimpleArithmetic, CheckedConversion, Zero, + SimpleArithmetic, CheckedConversion, }; use sp_state_machine::{ ChangesTrieRootsStorage, ChangesTrieAnchorBlockId, ChangesTrieConfigurationRange, - TrieBackend, read_proof_check, key_changes_proof_check, create_proof_check_backend_storage, - read_child_proof_check, + InMemoryChangesTrieStorage, TrieBackend, read_proof_check, key_changes_proof_check_with_db, + create_proof_check_backend_storage, read_child_proof_check, }; pub use sp_state_machine::StorageProof; use sp_blockchain::{Error as ClientError, Result as ClientResult}; @@ -113,30 +113,34 @@ impl> LightDataChecker { )?; } - // FIXME: remove this in https://github.com/paritytech/substrate/pull/3201 - let changes_trie_config_range = ChangesTrieConfigurationRange { - config: &request.changes_trie_config, - zero: Zero::zero(), - end: None, - }; - // and now check the key changes proof + get the changes - key_changes_proof_check::( - changes_trie_config_range, - &RootsStorage { - roots: (request.tries_roots.0, &request.tries_roots.2), - prev_roots: remote_roots, - }, - remote_proof, - request.first_block.0, - &ChangesTrieAnchorBlockId { - hash: convert_hash(&request.last_block.1), - number: request.last_block.0, - }, - remote_max_block, - request.storage_key.as_ref().map(Vec::as_slice), - &request.key) - .map_err(|err| ClientError::ChangesTrieAccessFailed(err)) + let mut result = Vec::new(); + let proof_storage = InMemoryChangesTrieStorage::with_proof(remote_proof); + for config_range in &request.changes_trie_configs { + let result_range = key_changes_proof_check_with_db::( + ChangesTrieConfigurationRange { + config: config_range.config.as_ref().ok_or(ClientError::ChangesTriesNotSupported)?, + zero: config_range.zero.0, + end: config_range.end.map(|(n, _)| n), + }, + &RootsStorage { + roots: (request.tries_roots.0, &request.tries_roots.2), + prev_roots: &remote_roots, + }, + &proof_storage, + request.first_block.0, + &ChangesTrieAnchorBlockId { + hash: convert_hash(&request.last_block.1), + number: request.last_block.0, + }, + remote_max_block, + request.storage_key.as_ref().map(Vec::as_slice), + &request.key) + .map_err(|err| ClientError::ChangesTrieAccessFailed(err))?; + result.extend(result_range); + } + + Ok(result) } /// Check CHT-based proof for changes tries roots. @@ -197,7 +201,7 @@ impl> LightDataChecker { impl FetchChecker for LightDataChecker where Block: BlockT, - E: CodeExecutor, + E: CodeExecutor + Clone + 'static, H: Hasher, H::Out: Ord + codec::Codec + 'static, S: BlockchainStorage, @@ -284,7 +288,7 @@ impl FetchChecker for LightDataChecker /// A view of BTreeMap as a changes trie roots storage. struct RootsStorage<'a, Number: SimpleArithmetic, Hash: 'a> { roots: (Number, &'a [Hash]), - prev_roots: BTreeMap, + prev_roots: &'a BTreeMap, } impl<'a, H, Number, Hash> ChangesTrieRootsStorage for RootsStorage<'a, Number, Hash> @@ -340,7 +344,7 @@ pub mod tests { use crate::in_mem::{Blockchain as InMemoryBlockchain}; use crate::light::fetcher::{FetchChecker, LightDataChecker, RemoteHeaderRequest}; use crate::light::blockchain::tests::{DummyStorage, DummyBlockchain}; - use sp_core::{blake2_256, Blake2Hasher, H256}; + use sp_core::{blake2_256, Blake2Hasher, ChangesTrieConfiguration, H256}; use sp_core::storage::{well_known_keys, StorageKey, ChildInfo}; use sp_runtime::generic::BlockId; use sp_state_machine::Backend; @@ -569,8 +573,13 @@ pub mod tests { // check proof on local client let local_roots_range = local_roots.clone()[(begin - 1) as usize..].to_vec(); + let config = ChangesTrieConfiguration::new(4, 2); let request = RemoteChangesRequest::
{ - changes_trie_config: runtime::changes_trie_config(), + changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { + zero: (0, Default::default()), + end: None, + config: Some(config), + }], first_block: (begin, begin_hash), last_block: (end, end_hash), max_block: (max, max_hash), @@ -624,8 +633,13 @@ pub mod tests { ); // check proof on local client + let config = ChangesTrieConfiguration::new(4, 2); let request = RemoteChangesRequest::
{ - changes_trie_config: runtime::changes_trie_config(), + changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { + zero: (0, Default::default()), + end: None, + config: Some(config), + }], first_block: (1, b1), last_block: (4, b4), max_block: (4, b4), @@ -665,8 +679,13 @@ pub mod tests { begin_hash, end_hash, begin_hash, max_hash, None, &key).unwrap(); let local_roots_range = local_roots.clone()[(begin - 1) as usize..].to_vec(); + let config = ChangesTrieConfiguration::new(4, 2); let request = RemoteChangesRequest::
{ - changes_trie_config: runtime::changes_trie_config(), + changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { + zero: (0, Default::default()), + end: None, + config: Some(config), + }], first_block: (begin, begin_hash), last_block: (end, end_hash), max_block: (max, max_hash), diff --git a/client/src/light/mod.rs b/client/src/light/mod.rs index 5052b36d7f3e1..d65fdef71193d 100644 --- a/client/src/light/mod.rs +++ b/client/src/light/mod.rs @@ -56,7 +56,7 @@ pub fn new_light_backend(blockchain: Arc>) -> Arc( backend: Arc>>, - genesis_storage: GS, + genesis_storage: &GS, code_executor: E, ) -> ClientResult< Client< @@ -73,7 +73,7 @@ pub fn new_light( B: BlockT, S: BlockchainStorage + 'static, GS: BuildStorage, - E: CodeExecutor + RuntimeInfo, + E: CodeExecutor + RuntimeInfo + Clone + 'static, { let local_executor = LocalCallExecutor::new(backend.clone(), code_executor); let executor = GenesisCallExecutor::new(backend.clone(), local_executor); diff --git a/client/state-db/Cargo.toml b/client/state-db/Cargo.toml index 31335d752ea67..9e162c5cb9898 100644 --- a/client/state-db/Cargo.toml +++ b/client/state-db/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-state-db" -version = "2.0.0" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" diff --git a/client/tracing/src/lib.rs b/client/tracing/src/lib.rs index 451f22fe14f6c..b6c1b1120f4ed 100644 --- a/client/tracing/src/lib.rs +++ b/client/tracing/src/lib.rs @@ -290,6 +290,6 @@ fn send_telemetry(span_datum: SpanDatum) { ); } -fn send_prometheus(span_datum: SpanDatum) { +fn send_prometheus(_span_datum: SpanDatum) { unimplemented!() } diff --git a/client/transaction-pool/graph/src/validated_pool.rs b/client/transaction-pool/graph/src/validated_pool.rs index 7af32c88184e6..29f82fb894ac0 100644 --- a/client/transaction-pool/graph/src/validated_pool.rs +++ b/client/transaction-pool/graph/src/validated_pool.rs @@ -470,12 +470,15 @@ impl ValidatedPool { return vec![] } + debug!(target: "txpool", "Removing invalid transactions: {:?}", hashes); + // temporarily ban invalid transactions - debug!(target: "txpool", "Banning invalid transactions: {:?}", hashes); self.rotator.ban(&time::Instant::now(), hashes.iter().cloned()); let invalid = self.pool.write().remove_subtree(hashes); + debug!(target: "txpool", "Removed invalid transactions: {:?}", invalid); + let mut listener = self.listener.write(); for tx in &invalid { listener.invalid(&tx.hash, true); diff --git a/docs/CODEOWNERS b/docs/CODEOWNERS index bc4559805b3e0..9304f278534bb 100644 --- a/docs/CODEOWNERS +++ b/docs/CODEOWNERS @@ -46,11 +46,16 @@ /client/finality-grandpa/ @andresilva @DemiMarie-parity /client/consensus/babe/ @andresilva @DemiMarie-parity /client/consensus/slots/ @andresilva @DemiMarie-parity +/client/consensus/pow/ @sorpaas +/primitives/consensus/pow/ @sorpaas # Contracts /frame/contracts/ @pepyakin @thiolliere /frame/contracts/src/wasm/runtime.rs @Robbepop +# EVM +/frame/evm/ @sorpaas + # Inflation points /frame/staking/src/inflation.rs @thiolliere diff --git a/frame/balances/Cargo.toml b/frame/balances/Cargo.toml index e5a3c4f2d7602..f9522b5379ff5 100644 --- a/frame/balances/Cargo.toml +++ b/frame/balances/Cargo.toml @@ -6,7 +6,6 @@ edition = "2018" [dependencies] serde = { version = "1.0.101", optional = true } -safe-mix = { version = "1.0.0", default-features = false } codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } @@ -22,7 +21,6 @@ pallet-transaction-payment = { version = "2.0.0", path = "../transaction-payment default = ["std"] std = [ "serde", - "safe-mix/std", "codec/std", "sp-std/std", "frame-support/std", diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index 1f8e099880b7d..b488b96701cf1 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -885,7 +885,10 @@ where Self::free_balance(who) + Self::reserved_balance(who) } + // Check if `value` amount of free balance can be slashed from `who`. + // Is a no-op if value to be slashed is zero. fn can_slash(who: &T::AccountId, value: Self::Balance) -> bool { + if value.is_zero() { return true } Self::free_balance(who) >= value } @@ -901,7 +904,10 @@ where >::get(who) } + // Burn funds from the total issuance, returning a positive imbalance for the amount burned. + // Is a no-op if amount to be burned is zero. fn burn(mut amount: Self::Balance) -> Self::PositiveImbalance { + if amount.is_zero() { return PositiveImbalance::zero() } >::mutate(|issued| { *issued = issued.checked_sub(&amount).unwrap_or_else(|| { amount = *issued; @@ -911,7 +917,11 @@ where PositiveImbalance::new(amount) } + // Create new funds into the total issuance, returning a negative imbalance + // for the amount issued. + // Is a no-op if amount to be issued it zero. fn issue(mut amount: Self::Balance) -> Self::NegativeImbalance { + if amount.is_zero() { return NegativeImbalance::zero() } >::mutate(|issued| *issued = issued.checked_add(&amount).unwrap_or_else(|| { amount = Self::Balance::max_value() - *issued; @@ -921,16 +931,21 @@ where NegativeImbalance::new(amount) } + // Ensure that an account can withdraw from their free balance given any existing withdrawal + // restrictions like locks and vesting balance. + // Is a no-op if amount to be withdrawn is zero. + // // # // Despite iterating over a list of locks, they are limited by the number of // lock IDs, which means the number of runtime modules that intend to use and create locks. // # fn ensure_can_withdraw( who: &T::AccountId, - _amount: T::Balance, + amount: T::Balance, reasons: WithdrawReasons, new_balance: T::Balance, ) -> DispatchResult { + if amount.is_zero() { return Ok(()) } if reasons.intersects(WithdrawReason::Reserve | WithdrawReason::Transfer) && Self::vesting_balance(who) > new_balance { @@ -955,12 +970,15 @@ where } } + // Transfer some free balance from `transactor` to `dest`, respecting existence requirements. + // Is a no-op if value to be transferred is zero. fn transfer( transactor: &T::AccountId, dest: &T::AccountId, value: Self::Balance, existence_requirement: ExistenceRequirement, ) -> DispatchResult { + if value.is_zero() { return Ok(()) } let from_balance = Self::free_balance(transactor); let to_balance = Self::free_balance(dest); let would_create = to_balance.is_zero(); @@ -1001,12 +1019,16 @@ where Ok(()) } + // Withdraw some free balance from an account, respecting existence requirements. + // Is a no-op if value to be withdrawn is zero. fn withdraw( who: &T::AccountId, value: Self::Balance, reasons: WithdrawReasons, liveness: ExistenceRequirement, ) -> result::Result { + if value.is_zero() { return Ok(NegativeImbalance::zero()); } + let old_balance = Self::free_balance(who); if let Some(new_balance) = old_balance.checked_sub(&value) { // if we need to keep the account alive... @@ -1026,10 +1048,15 @@ where } } + // Slash an account, returning the negative imbalance created and any left over + // amount that could not be slashed. + // Is a no-op if value to be slashed is zero. fn slash( who: &T::AccountId, value: Self::Balance ) -> (Self::NegativeImbalance, Self::Balance) { + if value.is_zero() { return (NegativeImbalance::zero(), Zero::zero()) } + let free_balance = Self::free_balance(who); let free_slash = cmp::min(free_balance, value); @@ -1049,10 +1076,14 @@ where } } + // Deposit some `value` into the free balance of an existing account. + // Is a no-op if the value to be deposited is zero. fn deposit_into_existing( who: &T::AccountId, value: Self::Balance ) -> result::Result { + if value.is_zero() { return Ok(PositiveImbalance::zero()) } + if Self::total_balance(who).is_zero() { Err(Error::::DeadAccount)? } @@ -1060,10 +1091,14 @@ where Ok(PositiveImbalance::new(value)) } + // Deposit some `value` into the free balance of `who`, possibly creating a new account. + // Is a no-op if the value to be deposited is zero. fn deposit_creating( who: &T::AccountId, value: Self::Balance, ) -> Self::PositiveImbalance { + if value.is_zero() { return Self::PositiveImbalance::zero() } + let (imbalance, _) = Self::make_free_balance_be(who, Self::free_balance(who) + value); if let SignedImbalance::Positive(p) = imbalance { p @@ -1122,7 +1157,10 @@ impl, I: Instance> ReservableCurrency for Module where T::Balance: MaybeSerializeDeserialize + Debug { + // Check if `who` can reserve `value` from their free balance. + // Is a no-op if value to be reserved is zero. fn can_reserve(who: &T::AccountId, value: Self::Balance) -> bool { + if value.is_zero() { return true } Self::free_balance(who) .checked_sub(&value) .map_or(false, |new_balance| @@ -1134,7 +1172,10 @@ where >::get(who) } + // Move `value` from the free balance from `who` to their reserved balance. + // Is a no-op if value to be reserved is zero. fn reserve(who: &T::AccountId, value: Self::Balance) -> result::Result<(), DispatchError> { + if value.is_zero() { return Ok(()) } let b = Self::free_balance(who); if b < value { Err(Error::::InsufficientBalance)? @@ -1146,7 +1187,10 @@ where Ok(()) } + // Unreserve some funds, returning any amount that was unable to be unreserved. + // Is a no-op if the value to be unreserved is zero. fn unreserve(who: &T::AccountId, value: Self::Balance) -> Self::Balance { + if value.is_zero() { return Zero::zero() } let b = Self::reserved_balance(who); let actual = cmp::min(b, value); Self::set_free_balance(who, Self::free_balance(who) + actual); @@ -1154,10 +1198,14 @@ where value - actual } + // Slash from reserved balance, returning the negative imbalance created, + // and any amount that was unable to be slashed. + // Is a no-op if the value to be slashed is zero. fn slash_reserved( who: &T::AccountId, value: Self::Balance ) -> (Self::NegativeImbalance, Self::Balance) { + if value.is_zero() { return (NegativeImbalance::zero(), Zero::zero()) } let b = Self::reserved_balance(who); let slash = cmp::min(b, value); // underflow should never happen, but it if does, there's nothing to be done here. @@ -1165,11 +1213,14 @@ where (NegativeImbalance::new(slash), value - slash) } + // Move the reserved balance of one account into the free balance of another. + // Is a no-op if the value to be moved is zero. fn repatriate_reserved( slashed: &T::AccountId, beneficiary: &T::AccountId, value: Self::Balance, ) -> result::Result { + if value.is_zero() { return Ok (Zero::zero()) } if Self::total_balance(beneficiary).is_zero() { Err(Error::::DeadAccount)? } @@ -1187,6 +1238,8 @@ where { type Moment = T::BlockNumber; + // Set a lock on the balance of `who`. + // Is a no-op if lock amount is zero. fn set_lock( id: LockIdentifier, who: &T::AccountId, @@ -1194,6 +1247,7 @@ where until: T::BlockNumber, reasons: WithdrawReasons, ) { + if amount.is_zero() { return } let now = >::block_number(); let mut new_lock = Some(BalanceLock { id, amount, until, reasons }); let mut locks = Self::locks(who).into_iter().filter_map(|l| @@ -1275,12 +1329,14 @@ where /// /// If there already exists a vesting schedule for the given account, an `Err` is returned /// and nothing is updated. + /// Is a no-op if the amount to be vested is zero. fn add_vesting_schedule( who: &T::AccountId, locked: T::Balance, per_block: T::Balance, starting_block: T::BlockNumber ) -> DispatchResult { + if locked.is_zero() { return Ok(()) } if >::exists(who) { Err(Error::::ExistingVestingSchedule)? } diff --git a/frame/collective/Cargo.toml b/frame/collective/Cargo.toml index 6bf3c1b9b5c78..a2150b82dd5e6 100644 --- a/frame/collective/Cargo.toml +++ b/frame/collective/Cargo.toml @@ -6,7 +6,6 @@ edition = "2018" [dependencies] serde = { version = "1.0.101", optional = true } -safe-mix = { version = "1.0.0", default-features = false } codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } @@ -22,7 +21,6 @@ pallet-balances = { version = "2.0.0", path = "../balances" } [features] default = ["std"] std = [ - "safe-mix/std", "codec/std", "sp-core/std", "sp-std/std", diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index c6102d23eabbe..d583809f0bfa7 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -14,7 +14,7 @@ sp-core = { version = "2.0.0", default-features = false, path = "../../primitive sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-sandbox = { version = "2.0.0", default-features = false, path = "../../primitives/sandbox" } +sp-sandbox = { version = "0.8.0", default-features = false, path = "../../primitives/sandbox" } frame-support = { version = "2.0.0", default-features = false, path = "../support" } frame-system = { version = "2.0.0", default-features = false, path = "../system" } diff --git a/frame/contracts/rpc/Cargo.toml b/frame/contracts/rpc/Cargo.toml index c563d9f727dec..61eb55368eb10 100644 --- a/frame/contracts/rpc/Cargo.toml +++ b/frame/contracts/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-contracts-rpc" -version = "2.0.0" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" @@ -15,7 +15,7 @@ sp-rpc = { version = "2.0.0", path = "../../../primitives/rpc" } serde = { version = "1.0.101", features = ["derive"] } sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } sp-api = { version = "2.0.0", path = "../../../primitives/api" } -pallet-contracts-rpc-runtime-api = { version = "2.0.0", path = "./runtime-api" } +pallet-contracts-rpc-runtime-api = { version = "0.8.0", path = "./runtime-api" } [dev-dependencies] serde_json = "1.0.41" diff --git a/frame/contracts/rpc/runtime-api/Cargo.toml b/frame/contracts/rpc/runtime-api/Cargo.toml index c8e183237cf43..e0cbd73f807f2 100644 --- a/frame/contracts/rpc/runtime-api/Cargo.toml +++ b/frame/contracts/rpc/runtime-api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-contracts-rpc-runtime-api" -version = "2.0.0" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" diff --git a/frame/democracy/Cargo.toml b/frame/democracy/Cargo.toml index 8e1aa13fc67f9..02145e8589bab 100644 --- a/frame/democracy/Cargo.toml +++ b/frame/democracy/Cargo.toml @@ -6,7 +6,6 @@ edition = "2018" [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -safe-mix = { version = "1.0.0", default-features = false } codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } @@ -22,7 +21,6 @@ pallet-balances = { version = "2.0.0", path = "../balances" } default = ["std"] std = [ "serde", - "safe-mix/std", "codec/std", "sp-std/std", "sp-io/std", diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index 354e93cc292ee..d7790be8f7065 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -497,7 +497,7 @@ decl_module! { } /// Vote in a referendum on behalf of a stash. If `vote.is_aye()`, the vote is to enact - /// the proposal; otherwise it is a vote to keep the status quo. + /// the proposal; otherwise it is a vote to keep the status quo. /// /// # /// - O(1). diff --git a/frame/elections/Cargo.toml b/frame/elections/Cargo.toml index 44d624b986240..495151018e7d8 100644 --- a/frame/elections/Cargo.toml +++ b/frame/elections/Cargo.toml @@ -6,7 +6,6 @@ edition = "2018" [dependencies] serde = { version = "1.0.101", optional = true } -safe-mix = { version = "1.0.0", default-features = false } codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } @@ -22,7 +21,6 @@ pallet-balances = { version = "2.0.0", path = "../balances" } [features] default = ["std"] std = [ - "safe-mix/std", "codec/std", "sp-core/std", "sp-std/std", diff --git a/frame/generic-asset/src/lib.rs b/frame/generic-asset/src/lib.rs index 0a0209b20f175..3b509a683d705 100644 --- a/frame/generic-asset/src/lib.rs +++ b/frame/generic-asset/src/lib.rs @@ -95,9 +95,10 @@ //! This will emit the `Transferred` event. //! - `reserve`: Moves an amount from free balance to reserved balance. //! - `unreserve`: Move up to an amount from reserved balance to free balance. This function cannot fail. +//! - `mint_free`: Mint to an account's free balance. +//! - `burn_free`: Burn an account's free balance. //! - `slash`: Deduct up to an amount from the combined balance of `who`, preferring to deduct from the //! free balance. This function cannot fail. -//! - `reward`: Add up to an amount to the free balance of an account. //! - `slash_reserved`: Deduct up to an amount from reserved balance of an account. This function cannot fail. //! - `repatriate_reserved`: Move up to an amount from reserved balance of an account to free balance of another //! account. @@ -109,7 +110,7 @@ //! //! The following examples show how to use the Generic Asset module in your custom module. //! -//! ### Examples from the PRML +//! ### Examples from the frame module //! //! The Fees module uses the `Currency` trait to handle fee charge/refund, and its types inherit from `Currency`: //! @@ -162,7 +163,7 @@ use sp_runtime::traits::{ use sp_std::prelude::*; use sp_std::{cmp, result, fmt::Debug}; use frame_support::{ - decl_event, decl_module, decl_storage, ensure, dispatch, decl_error, + decl_event, decl_module, decl_storage, ensure, decl_error, traits::{ Currency, ExistenceRequirement, Imbalance, LockIdentifier, LockableCurrency, ReservableCurrency, SignedImbalance, UpdateBalanceOutcome, WithdrawReason, WithdrawReasons, TryDrop, @@ -359,7 +360,7 @@ decl_module! { fn deposit_event() = default; /// Create a new kind of asset. - fn create(origin, options: AssetOptions) -> dispatch::DispatchResult { + fn create(origin, options: AssetOptions) -> DispatchResult { let origin = ensure_signed(origin)?; let id = Self::next_asset_id(); @@ -392,7 +393,7 @@ decl_module! { origin, #[compact] asset_id: T::AssetId, new_permission: PermissionLatest - ) -> dispatch::DispatchResult { + ) -> DispatchResult { let origin = ensure_signed(origin)?; let permissions: PermissionVersions = new_permission.into(); @@ -410,56 +411,20 @@ decl_module! { /// Mints an asset, increases its total issuance. /// The origin must have `mint` permissions. - fn mint(origin, #[compact] asset_id: T::AssetId, to: T::AccountId, amount: T::Balance) - -> dispatch::DispatchResult - { - let origin = ensure_signed(origin)?; - if Self::check_permission(&asset_id, &origin, &PermissionType::Mint) { - let original_free_balance = Self::free_balance(&asset_id, &to); - let current_total_issuance = >::get(asset_id); - let new_total_issuance = current_total_issuance.checked_add(&amount) - .ok_or(Error::::TotalMintingOverflow)?; - let value = original_free_balance.checked_add(&amount) - .ok_or(Error::::FreeMintingOverflow)?; - - >::insert(asset_id, new_total_issuance); - Self::set_free_balance(&asset_id, &to, value); - - Self::deposit_event(RawEvent::Minted(asset_id, to, amount)); - - Ok(()) - } else { - Err(Error::::NoMintPermission)? - } + fn mint(origin, #[compact] asset_id: T::AssetId, to: T::AccountId, amount: T::Balance) -> DispatchResult { + let who = ensure_signed(origin)?; + Self::mint_free(&asset_id, &who, &to, &amount)?; + Self::deposit_event(RawEvent::Minted(asset_id, to, amount)); + Ok(()) } /// Burns an asset, decreases its total issuance. - /// /// The `origin` must have `burn` permissions. - fn burn(origin, #[compact] asset_id: T::AssetId, to: T::AccountId, amount: T::Balance) - -> dispatch::DispatchResult - { - let origin = ensure_signed(origin)?; - - if Self::check_permission(&asset_id, &origin, &PermissionType::Burn) { - let original_free_balance = Self::free_balance(&asset_id, &to); - - let current_total_issuance = >::get(asset_id); - let new_total_issuance = current_total_issuance.checked_sub(&amount) - .ok_or(Error::::TotalBurningUnderflow)?; - let value = original_free_balance.checked_sub(&amount) - .ok_or(Error::::FreeBurningUnderflow)?; - - >::insert(asset_id, new_total_issuance); - - Self::set_free_balance(&asset_id, &to, value); - - Self::deposit_event(RawEvent::Burned(asset_id, to, amount)); - - Ok(()) - } else { - Err(Error::::NoBurnPermission)? - } + fn burn(origin, #[compact] asset_id: T::AssetId, to: T::AccountId, amount: T::Balance) -> DispatchResult { + let who = ensure_signed(origin)?; + Self::burn_free(&asset_id, &who, &to, &amount)?; + Self::deposit_event(RawEvent::Burned(asset_id, to, amount)); + Ok(()) } /// Can be used to create reserved tokens. @@ -468,7 +433,7 @@ decl_module! { origin, asset_id: T::AssetId, options: AssetOptions - ) -> dispatch::DispatchResult { + ) -> DispatchResult { ensure_root(origin)?; Self::create_asset(Some(asset_id), None, options) } @@ -565,6 +530,53 @@ impl Module { >::get(asset_id, who) } + /// Mint to an account's free balance, without event + pub fn mint_free( + asset_id: &T::AssetId, + who: &T::AccountId, + to: &T::AccountId, + amount: &T::Balance, + ) -> DispatchResult { + if Self::check_permission(asset_id, who, &PermissionType::Mint) { + let original_free_balance = Self::free_balance(&asset_id, &to); + let current_total_issuance = >::get(asset_id); + let new_total_issuance = current_total_issuance.checked_add(&amount) + .ok_or(Error::::TotalMintingOverflow)?; + let value = original_free_balance.checked_add(&amount) + .ok_or(Error::::FreeMintingOverflow)?; + + >::insert(asset_id, new_total_issuance); + Self::set_free_balance(&asset_id, &to, value); + Ok(()) + } else { + Err(Error::::NoMintPermission)? + } + } + + /// Burn an account's free balance, without event + pub fn burn_free( + asset_id: &T::AssetId, + who: &T::AccountId, + to: &T::AccountId, + amount: &T::Balance, + ) -> DispatchResult { + if Self::check_permission(asset_id, who, &PermissionType::Burn) { + let original_free_balance = Self::free_balance(asset_id, to); + + let current_total_issuance = >::get(asset_id); + let new_total_issuance = current_total_issuance.checked_sub(amount) + .ok_or(Error::::TotalBurningUnderflow)?; + let value = original_free_balance.checked_sub(amount) + .ok_or(Error::::FreeBurningUnderflow)?; + + >::insert(asset_id, new_total_issuance); + Self::set_free_balance(asset_id, to, value); + Ok(()) + } else { + Err(Error::::NoBurnPermission)? + } + } + /// Creates an asset. /// /// # Arguments @@ -577,7 +589,7 @@ impl Module { asset_id: Option, from_account: Option, options: AssetOptions, - ) -> dispatch::DispatchResult { + ) -> DispatchResult { let asset_id = if let Some(asset_id) = asset_id { ensure!(!>::exists(&asset_id), Error::::IdAlreadyTaken); ensure!(asset_id < Self::next_asset_id(), Error::::IdUnavailable); @@ -610,7 +622,7 @@ impl Module { from: &T::AccountId, to: &T::AccountId, amount: T::Balance - ) -> dispatch::DispatchResult { + ) -> DispatchResult { let new_balance = Self::free_balance(asset_id, from) .checked_sub(&amount) .ok_or(Error::::InsufficientBalance)?; @@ -631,7 +643,7 @@ impl Module { from: &T::AccountId, to: &T::AccountId, amount: T::Balance, - ) -> dispatch::DispatchResult { + ) -> DispatchResult { Self::make_transfer(asset_id, from, to, amount)?; if from != to { @@ -646,7 +658,7 @@ impl Module { /// If the free balance is lower than `amount`, then no funds will be moved and an `Err` will /// be returned. This is different behavior than `unreserve`. pub fn reserve(asset_id: &T::AssetId, who: &T::AccountId, amount: T::Balance) - -> dispatch::DispatchResult + -> DispatchResult { // Do we need to consider that this is an atomic transaction? let original_reserve_balance = Self::reserved_balance(asset_id, who); @@ -683,7 +695,7 @@ impl Module { /// then `Some(remaining)` will be returned. Full completion is given by `None`. /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that /// the caller will do this. - fn slash(asset_id: &T::AssetId, who: &T::AccountId, amount: T::Balance) -> Option { + pub fn slash(asset_id: &T::AssetId, who: &T::AccountId, amount: T::Balance) -> Option { let free_balance = Self::free_balance(asset_id, who); let free_slash = sp_std::cmp::min(free_balance, amount); let new_free_balance = free_balance - free_slash; @@ -701,7 +713,7 @@ impl Module { /// is less than `amount`, then a non-zero second item will be returned. /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that /// the caller will do this. - fn slash_reserved(asset_id: &T::AssetId, who: &T::AccountId, amount: T::Balance) -> Option { + pub fn slash_reserved(asset_id: &T::AssetId, who: &T::AccountId, amount: T::Balance) -> Option { let original_reserve_balance = Self::reserved_balance(asset_id, who); let slash = sp_std::cmp::min(original_reserve_balance, amount); let new_reserve_balance = original_reserve_balance - slash; @@ -720,7 +732,7 @@ impl Module { /// the `remaining` would be returned, else `Zero::zero()`. /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that /// the caller will do this. - fn repatriate_reserved( + pub fn repatriate_reserved( asset_id: &T::AssetId, who: &T::AccountId, beneficiary: &T::AccountId, @@ -785,7 +797,7 @@ impl Module { _amount: T::Balance, reasons: WithdrawReasons, new_balance: T::Balance, - ) -> dispatch::DispatchResult { + ) -> DispatchResult { if asset_id != &Self::staking_asset_id() { return Ok(()); } diff --git a/frame/indices/Cargo.toml b/frame/indices/Cargo.toml index d05a7654d1240..bd83761ff1a25 100644 --- a/frame/indices/Cargo.toml +++ b/frame/indices/Cargo.toml @@ -6,7 +6,6 @@ edition = "2018" [dependencies] serde = { version = "1.0.101", optional = true } -safe-mix = { version = "1.0.0", default-features = false } codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } sp-keyring = { version = "2.0.0", optional = true, path = "../../primitives/keyring" } sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } @@ -20,7 +19,6 @@ frame-system = { version = "2.0.0", default-features = false, path = "../system" default = ["std"] std = [ "serde", - "safe-mix/std", "sp-keyring", "codec/std", "sp-core/std", diff --git a/frame/metadata/Cargo.toml b/frame/metadata/Cargo.toml index 64cf326ff68dc..bcb90d2368928 100644 --- a/frame/metadata/Cargo.toml +++ b/frame/metadata/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-metadata" -version = "2.0.0" +version = "10.0.0" authors = ["Parity Technologies "] edition = "2018" diff --git a/frame/session/Cargo.toml b/frame/session/Cargo.toml index 82047dbd68336..aa9aaa1f4479a 100644 --- a/frame/session/Cargo.toml +++ b/frame/session/Cargo.toml @@ -6,7 +6,6 @@ edition = "2018" [dependencies] serde = { version = "1.0.101", optional = true } -safe-mix = { version = "1.0.0", default-features = false } codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } @@ -28,7 +27,6 @@ default = ["std", "historical"] historical = ["sp-trie"] std = [ "serde", - "safe-mix/std", "codec/std", "sp-std/std", "frame-support/std", diff --git a/frame/society/src/lib.rs b/frame/society/src/lib.rs index 27051e4caf8d3..f4e5904ea42d7 100644 --- a/frame/society/src/lib.rs +++ b/frame/society/src/lib.rs @@ -15,17 +15,17 @@ // along with Substrate. If not, see . //! # Society Module -//! +//! //! - [`society::Trait`](./trait.Trait.html) //! - [`Call`](./enum.Call.html) -//! +//! //! ## Overview -//! +//! //! The Society module is an economic game which incentivizes users to participate -//! and maintain a membership society. -//! +//! and maintain a membership society. +//! //! ### User Types -//! +//! //! At any point, a user in the society can be one of a: //! * Bidder - A user who has submitted intention of joining the society. //! * Candidate - A user who will be voted on to join the society. @@ -33,31 +33,31 @@ //! * Member - A user who is a member of the society. //! * Suspended Member - A member of the society who has accumulated too many strikes //! or failed their membership challenge. -//! +//! //! Of the non-suspended members, there is always a: //! * Head - A member who is exempt from suspension. //! * Defender - A member whose membership is under question and voted on again. -//! +//! //! Of the non-suspended members of the society, a random set of them are chosen as //! "skeptics". The mechanics of skeptics is explained in the //! [member phase](#member-phase) below. -//! +//! //! ### Mechanics -//! +//! //! #### Rewards -//! +//! //! Members are incentivized to participate in the society through rewards paid //! by the Society treasury. These payments have a maturity period that the user //! must wait before they are able to access the funds. -//! +//! //! #### Punishments -//! +//! //! Members can be punished by slashing the reward payouts that have not been //! collected. Additionally, members can accumulate "strikes", and when they //! reach a max strike limit, they become suspended. -//! +//! //! #### Skeptics -//! +//! //! During the voting period, a random set of members are selected as "skeptics". //! These skeptics are expected to vote on the current candidates. If they do not vote, //! their skeptic status is treated as a rejection vote, the member is deemed @@ -72,7 +72,7 @@ //! assuming no one else votes, the defender always get a free vote on their //! own challenge keeping them in the society. The Head member is exempt from the //! negative outcome of a membership challenge. -//! +//! //! #### Society Treasury //! //! The membership society is independently funded by a treasury managed by this @@ -80,17 +80,17 @@ //! to determine the number of accepted bids. //! //! #### Rate of Growth -//! +//! //! The membership society can grow at a rate of 10 accepted candidates per rotation period up //! to the max membership threshold. Once this threshold is met, candidate selections //! are stalled until there is space for new members to join. This can be resolved by //! voting out existing members through the random challenges or by using governance //! to increase the maximum membership count. -//! +//! //! ### User Life Cycle -//! +//! //! A user can go through the following phases: -//! +//! //! ```ignore //! +-------> User <----------+ //! | + | @@ -115,40 +115,40 @@ //! | | //! +------------------Society---------------------+ //! ``` -//! +//! //! #### Initialization -//! +//! //! The society is initialized with a single member who is automatically chosen as the Head. -//! +//! //! #### Bid Phase -//! +//! //! New users must have a bid to join the society. -//! +//! //! A user can make a bid by reserving a deposit. Alternatively, an already existing member //! can create a bid on a user's behalf by "vouching" for them. -//! +//! //! A bid includes reward information that the user would like to receive for joining //! the society. A vouching bid can additionally request some portion of that reward as a tip //! to the voucher for vouching for the prospective candidate. -//! +//! //! Every rotation period, Bids are ordered by reward amount, and the module //! selects as many bids the Society Pot can support for that period. -//! +//! //! These selected bids become candidates and move on to the Candidate phase. //! Bids that were not selected stay in the bidder pool until they are selected or //! a user chooses to "unbid". -//! +//! //! #### Candidate Phase -//! +//! //! Once a bidder becomes a candidate, members vote whether to approve or reject //! that candidate into society. This voting process also happens during a rotation period. -//! +//! //! The approval and rejection criteria for candidates are not set on chain, //! and may change for different societies. -//! +//! //! At the end of the rotation period, we collect the votes for a candidate //! and randomly select a vote as the final outcome. -//! +//! //! ```ignore //! [ a-accept, r-reject, s-skeptic ] //! +----------------------------------+ @@ -163,63 +163,63 @@ //! //! Result: Rejected //! ``` -//! +//! //! Each member that voted opposite to this randomly selected vote is punished by //! slashing their unclaimed payouts and increasing the number of strikes they have. -//! +//! //! These slashed funds are given to a random user who voted the same as the //! selected vote as a reward for participating in the vote. -//! +//! //! If the candidate wins the vote, they receive their bid reward as a future payout. //! If the bid was placed by a voucher, they will receive their portion of the reward, //! before the rest is paid to the winning candidate. //! //! One winning candidate is selected as the Head of the members. This is randomly //! chosen, weighted by the number of approvals the winning candidates accumulated. -//! +//! //! If the candidate loses the vote, they are suspended and it is up to the Suspension //! Judgement origin to determine if the candidate should go through the bidding process //! again, should be accepted into the membership society, or rejected and their deposit //! slashed. -//! +//! //! #### Member Phase -//! +//! //! Once a candidate becomes a member, their role is to participate in society. -//! +//! //! Regular participation involves voting on candidates who want to join the membership //! society, and by voting in the right way, a member will accumulate future payouts. //! When a payout matures, members are able to claim those payouts. -//! +//! //! Members can also vouch for users to join the society, and request a "tip" from //! the fees the new member would collect by joining the society. This vouching //! process is useful in situations where a user may not have enough balance to //! satisfy the bid deposit. A member can only vouch one user at a time. -//! +//! //! During rotation periods, a random group of members are selected as "skeptics". //! These skeptics are expected to vote on the current candidates. If they do not vote, //! their skeptic status is treated as a rejection vote, the member is deemed //! "lazy", and are given a strike per missing vote. -//! +//! //! There is a challenge period in parallel to the rotation period. During a challenge period, //! a random member is selected to defend their membership to the society. Other members //! make a traditional majority-wins vote to determine if the member should stay in the society. //! Ties are treated as a failure of the challenge. -//! +//! //! If a member accumulates too many strikes or fails their membership challenge, //! they will become suspended. While a member is suspended, they are unable to //! claim matured payouts. It is up to the Suspension Judgement origin to determine //! if the member should re-enter society or be removed from society with all their //! future payouts slashed. -//! +//! //! ## Interface -//! +//! //! ### Dispatchable Functions -//! +//! //! #### For General Users -//! +//! //! * `bid` - A user can make a bid to join the membership society by reserving a deposit. //! * `unbid` - A user can withdraw their bid for entry, the deposit is returned. -//! +//! //! #### For Members //! //! * `vouch` - A member can place a bid on behalf of a user to join the membership society. @@ -228,9 +228,10 @@ //! * `defender_vote` - A member can vote to approve or reject a defender's continued membership //! to the society. //! * `payout` - A member can claim their first matured payment. -//! +//! * `unfound` - Allow the founder to unfound the society when they are the only member. +//! //! #### For Super Users -//! +//! //! * `found` - The founder origin can initiate this society. Useful for bootstrapping the Society //! pallet on an already running chain. //! * `judge_suspended_member` - The suspension judgement origin is able to make @@ -254,7 +255,7 @@ use sp_std::prelude::*; use codec::{Encode, Decode}; use sp_runtime::{Percent, ModuleId, RuntimeDebug, traits::{ - StaticLookup, AccountIdConversion, Saturating, Zero, IntegerSquareRoot, + StaticLookup, AccountIdConversion, Saturating, Zero, IntegerSquareRoot, Hash, TrailingZeroInput, CheckedSub, EnsureOrigin } }; @@ -305,7 +306,7 @@ pub trait Trait: system::Trait { type MaxLockDuration: Get; /// The origin that is allowed to call `found`. - type FounderOrigin: EnsureOrigin; + type FounderSetOrigin: EnsureOrigin; /// The origin that is allowed to make suspension judgements. type SuspensionJudgementOrigin: EnsureOrigin; @@ -400,6 +401,14 @@ impl BidKind { // This module's storage items. decl_storage! { trait Store for Module, I: Instance=DefaultInstance> as Society { + /// The first member. + pub Founder get(founder) build(|config: &GenesisConfig| config.members.first().cloned()): + Option; + + /// A hash of the rules of this society concerning membership. Can only be set once and + /// only by the founder. + pub Rules get(rules): Option; + /// The current set of candidates; bidders that are attempting to become members. pub Candidates get(candidates): Vec>>; @@ -422,7 +431,7 @@ decl_storage! { }): Vec; /// The set of suspended members. - pub SuspendedMembers get(fn suspended_member): map T::AccountId => Option<()>; + pub SuspendedMembers get(fn suspended_member): map T::AccountId => bool; /// The current bids, stored ordered by the value of the bid. Bids: Vec>>; @@ -444,7 +453,7 @@ decl_storage! { /// The defending member currently being challenged. Defender get(fn defender): Option; - + /// Votes for the defender. DefenderVotes: map hasher(twox_64_concat) T::AccountId => Option; @@ -796,28 +805,62 @@ decl_module! { /// This is done as a discrete action in order to allow for the /// module to be included into a running chain and can only be done once. /// - /// The dispatch origin for this call must be from the _FounderOrigin_. + /// The dispatch origin for this call must be from the _FounderSetOrigin_. /// /// Parameters: /// - `founder` - The first member and head of the newly founded society. + /// - `max_members` - The initial max number of members for the society. + /// - `rules` - The rules of this society concerning membership. /// /// # - /// - One storage read to check `Head`. O(1) + /// - Two storage mutates to set `Head` and `Founder`. O(1) /// - One storage write to add the first member to society. O(1) - /// - One storage write to add new Head. O(1) /// - One event. /// /// Total Complexity: O(1) /// # #[weight = SimpleDispatchInfo::FixedNormal(10_000)] - fn found(origin, founder: T::AccountId) { - T::FounderOrigin::ensure_origin(origin)?; + fn found(origin, founder: T::AccountId, max_members: u32, rules: Vec) { + T::FounderSetOrigin::ensure_origin(origin)?; ensure!(!>::exists(), Error::::AlreadyFounded); + ensure!(max_members > 1, Error::::MaxMembers); // This should never fail in the context of this function... + >::put(max_members); Self::add_member(&founder)?; >::put(&founder); + >::put(&founder); + Rules::::put(T::Hashing::hash(&rules)); Self::deposit_event(RawEvent::Founded(founder)); } + + /// Anull the founding of the society. + /// + /// The dispatch origin for this call must be Signed, and the signing account must be both + /// the `Founder` and the `Head`. This implies that it may only be done when there is one + /// member. + /// + /// # + /// - Two storage reads O(1). + /// - Four storage removals O(1). + /// - One event. + /// + /// Total Complexity: O(1) + /// # + #[weight = SimpleDispatchInfo::FixedNormal(20_000)] + fn unfound(origin) { + let founder = ensure_signed(origin)?; + ensure!(Founder::::get() == Some(founder.clone()), Error::::NotFounder); + ensure!(Head::::get() == Some(founder.clone()), Error::::NotHead); + + Members::::kill(); + Head::::kill(); + Founder::::kill(); + Rules::::kill(); + Candidates::::kill(); + SuspendedCandidates::::remove_all(); + Self::deposit_event(RawEvent::Unfounded(founder)); + } + /// Allow suspension judgement origin to make judgement on a suspended member. /// /// If a suspended member is forgiven, we simply add them back as a member, not affecting @@ -849,7 +892,7 @@ decl_module! { fn judge_suspended_member(origin, who: T::AccountId, forgive: bool) { T::SuspensionJudgementOrigin::ensure_origin(origin)?; ensure!(>::exists(&who), Error::::NotSuspended); - + if forgive { // Try to add member back to society. Can fail with `MaxMembers` limit. Self::add_member(&who)?; @@ -1010,34 +1053,40 @@ decl_error! { pub enum Error for Module, I: Instance> { /// An incorrect position was provided. BadPosition, - /// User is not a member + /// User is not a member. NotMember, - /// User is already a member + /// User is already a member. AlreadyMember, - /// User is suspended + /// User is suspended. Suspended, - /// User is not suspended + /// User is not suspended. NotSuspended, - /// Nothing to payout + /// Nothing to payout. NoPayout, - /// Society already founded + /// Society already founded. AlreadyFounded, - /// Not enough in pot to accept candidate + /// Not enough in pot to accept candidate. InsufficientPot, - /// Member is already vouching or banned from vouching again + /// Member is already vouching or banned from vouching again. AlreadyVouching, - /// Member is not vouching + /// Member is not vouching. NotVouching, - /// Cannot remove head + /// Cannot remove the head of the chain. Head, - /// User has already made a bid + /// Cannot remove the founder. + Founder, + /// User has already made a bid. AlreadyBid, - /// User is already a candidate + /// User is already a candidate. AlreadyCandidate, - /// User is not a candidate + /// User is not a candidate. NotCandidate, - /// Too many members in the society + /// Too many members in the society. MaxMembers, + /// The caller is not the founder. + NotFounder, + /// The caller is not the head. + NotHead, } } @@ -1078,6 +1127,20 @@ decl_event! { DefenderVote(AccountId, bool), /// A new max member count has been set NewMaxMembers(u32), + /// Society is unfounded. + Unfounded(AccountId), + } +} + +/// Simple ensure origin struct to filter for the founder account. +pub struct EnsureFounder(sp_std::marker::PhantomData); +impl EnsureOrigin for EnsureFounder { + type Success = T::AccountId; + fn try_origin(o: T::Origin) -> Result { + o.into().and_then(|o| match (o, Founder::::get()) { + (system::RawOrigin::Signed(ref who), Some(ref f)) if who == f => Ok(who.clone()), + (r, _) => Err(T::Origin::from(r)), + }) } } @@ -1201,17 +1264,18 @@ impl, I: Instance> Module { /// removes them from the Members storage item. pub fn remove_member(m: &T::AccountId) -> DispatchResult { ensure!(Self::head() != Some(m.clone()), Error::::Head); + ensure!(Self::founder() != Some(m.clone()), Error::::Founder); - >::mutate(|members| - match members.binary_search(&m) { - Err(_) => Err(Error::::NotMember)?, - Ok(i) => { - members.remove(i); - T::MembershipChanged::change_members_sorted(&[], &[m.clone()], members); - Ok(()) - } + let mut members = >::get(); + match members.binary_search(&m) { + Err(_) => Err(Error::::NotMember)?, + Ok(i) => { + members.remove(i); + T::MembershipChanged::change_members_sorted(&[], &[m.clone()], &members[..]); + >::put(members); + Ok(()) } - ) + } } /// End the current period and begin a new one. @@ -1251,7 +1315,7 @@ impl, I: Instance> Module { .filter_map(|m| >::take(&candidate, m).map(|v| (v, m))) .inspect(|&(v, _)| if v == Vote::Approve { approval_count += 1 }) .collect::>(); - + // Select one of the votes at random. // Note that `Vote::Skeptical` and `Vote::Reject` both reject the candidate. let is_accepted = pick_item(&mut rng, &votes).map(|x| x.0) == Some(Vote::Approve); @@ -1302,6 +1366,9 @@ impl, I: Instance> Module { } }).collect::>(); + // Clean up all votes. + >::remove_all(); + // Reward one of the voters who voted the right way. if !total_slash.is_zero() { if let Some(winner) = pick_item(&mut rng, &rewardees) { @@ -1325,7 +1392,7 @@ impl, I: Instance> Module { // if at least one candidate was accepted... if !accepted.is_empty() { - // select one as primary, randomly chosen from the accepted, weighted by approvals. + // select one as primary, randomly chosen from the accepted, weighted by approvals. // Choose a random number between 0 and `total_approvals` let primary_point = pick_usize(&mut rng, total_approvals - 1); // Find the zero bid or the user who falls on that point @@ -1333,7 +1400,7 @@ impl, I: Instance> Module { .expect("e.1 of final item == total_approvals; \ worst case find will always return that item; qed") .0.clone(); - + let accounts = accepted.into_iter().map(|x| x.0).collect::>(); // Then write everything back out, signal the changed membership and leave an event. @@ -1404,7 +1471,7 @@ impl, I: Instance> Module { /// Suspend a user, removing them from the member list. fn suspend_member(who: &T::AccountId) { if Self::remove_member(&who).is_ok() { - >::insert(who, ()); + >::insert(who, true); >::remove(who); Self::deposit_event(RawEvent::MemberSuspended(who.clone())); } @@ -1450,7 +1517,7 @@ impl, I: Instance> Module { let mut rejection_count = 0; // Tallies total number of approve and reject votes for the defender. members.iter() - .filter_map(|m| >::get(m)) + .filter_map(|m| >::take(m)) .for_each(|v| { match v { Vote::Approve => approval_count += 1, @@ -1463,6 +1530,9 @@ impl, I: Instance> Module { Self::suspend_member(&defender); *members = Self::members(); } + + // Clean up all votes. + >::remove_all(); } // Start a new defender rotation @@ -1509,8 +1579,10 @@ impl, I: Instance> Module { /// the number of bids would not surpass `MaxMembers` if all were accepted. /// /// May be empty. - pub fn take_selected(members_len: usize, pot: BalanceOf) -> Vec>> - { + pub fn take_selected( + members_len: usize, + pot: BalanceOf + ) -> Vec>> { let max_members = MaxMembers::::get() as usize; // No more than 10 will be returned. let mut max_selections: usize = 10.min(max_members.saturating_sub(members_len)); @@ -1521,7 +1593,7 @@ impl, I: Instance> Module { // The list of selected candidates let mut selected = Vec::new(); - + if bids.len() > 0 { // Can only select at most the length of bids max_selections = max_selections.min(bids.len()); diff --git a/frame/society/src/mock.rs b/frame/society/src/mock.rs index fc46561fd4fef..b8f249f35ce39 100644 --- a/frame/society/src/mock.rs +++ b/frame/society/src/mock.rs @@ -44,7 +44,6 @@ parameter_types! { pub const PeriodSpend: u64 = 1000; pub const MaxLockDuration: u64 = 100; pub const ChallengePeriod: u64 = 8; - pub const MaxMembers: u32 = 100; pub const BlockHashCount: u64 = 250; pub const MaximumBlockWeight: u32 = 1024; @@ -104,7 +103,7 @@ impl Trait for Test { type MembershipChanged = (); type RotationPeriod = RotationPeriod; type MaxLockDuration = MaxLockDuration; - type FounderOrigin = EnsureSignedBy; + type FounderSetOrigin = EnsureSignedBy; type SuspensionJudgementOrigin = EnsureSignedBy; type ChallengePeriod = ChallengePeriod; } @@ -133,6 +132,9 @@ impl EnvBuilder { (40, 50), (50, 50), (60, 50), + (70, 50), + (80, 50), + (90, 50), ], pot: 0, max_members: 100, diff --git a/frame/society/src/tests.rs b/frame/society/src/tests.rs index 7d3ee06a2abeb..3e5afc47f500a 100644 --- a/frame/society/src/tests.rs +++ b/frame/society/src/tests.rs @@ -21,21 +21,61 @@ use mock::*; use frame_support::{assert_ok, assert_noop}; use sp_runtime::traits::BadOrigin; +use sp_core::blake2_256; #[test] fn founding_works() { - EnvBuilder::new().with_members(vec![]).execute(|| { + EnvBuilder::new().with_max_members(0).with_members(vec![]).execute(|| { + // Not set up initially. + assert_eq!(Society::founder(), None); + assert_eq!(Society::max_members(), 0); + assert_eq!(Society::pot(), 0); // Account 1 is set as the founder origin // Account 5 cannot start a society - assert_noop!(Society::found(Origin::signed(5), 20), BadOrigin); + assert_noop!(Society::found(Origin::signed(5), 20, 100, vec![]), BadOrigin); // Account 1 can start a society, where 10 is the founding member - assert_ok!(Society::found(Origin::signed(1), 10)); + assert_ok!(Society::found(Origin::signed(1), 10, 100, b"be cool".to_vec())); // Society members only include 10 assert_eq!(Society::members(), vec![10]); // 10 is the head of the society assert_eq!(Society::head(), Some(10)); + // ...and also the founder + assert_eq!(Society::founder(), Some(10)); + // 100 members max + assert_eq!(Society::max_members(), 100); + // rules are correct + assert_eq!(Society::rules(), Some(blake2_256(b"be cool").into())); + // Pot grows after first rotation period + run_to_block(4); + assert_eq!(Society::pot(), 1000); // Cannot start another society - assert_noop!(Society::found(Origin::signed(1), 20), Error::::AlreadyFounded); + assert_noop!( + Society::found(Origin::signed(1), 20, 100, vec![]), + Error::::AlreadyFounded + ); + }); +} + +#[test] +fn unfounding_works() { + EnvBuilder::new().with_max_members(0).with_members(vec![]).execute(|| { + // Account 1 sets the founder... + assert_ok!(Society::found(Origin::signed(1), 10, 100, vec![])); + // Account 2 cannot unfound it as it's not the founder. + assert_noop!(Society::unfound(Origin::signed(2)), Error::::NotFounder); + // Account 10 can, though. + assert_ok!(Society::unfound(Origin::signed(10))); + + // 1 sets the founder to 20 this time + assert_ok!(Society::found(Origin::signed(1), 20, 100, vec![])); + // Bring in a new member... + assert_ok!(Society::bid(Origin::signed(10), 0)); + run_to_block(4); + assert_ok!(Society::vote(Origin::signed(20), 10, true)); + run_to_block(8); + + // Unfounding won't work now, even though it's from 20. + assert_noop!(Society::unfound(Origin::signed(20)), Error::::NotHead); }); } @@ -248,7 +288,7 @@ fn suspended_member_lifecycle_works() { assert_ok!(Society::add_member(&20)); assert_eq!(>::get(), vec![10, 20]); assert_eq!(Strikes::::get(20), 0); - assert_eq!(>::get(20), None); + assert_eq!(>::get(20), false); // Let's suspend account 20 by giving them 2 strikes by not voting assert_ok!(Society::bid(Origin::signed(30), 0)); @@ -258,29 +298,29 @@ fn suspended_member_lifecycle_works() { run_to_block(16); // Strike 2 is accumulated, and 20 is suspended :( - assert_eq!(>::get(20), Some(())); + assert_eq!(>::get(20), true); assert_eq!(>::get(), vec![10]); // Suspended members cannot get payout Society::bump_payout(&20, 10, 100); assert_noop!(Society::payout(Origin::signed(20)), Error::::NotMember); - + // Normal people cannot make judgement assert_noop!(Society::judge_suspended_member(Origin::signed(20), 20, true), BadOrigin); // Suspension judgment origin can judge thee // Suspension judgement origin forgives the suspended member assert_ok!(Society::judge_suspended_member(Origin::signed(2), 20, true)); - assert_eq!(>::get(20), None); + assert_eq!(>::get(20), false); assert_eq!(>::get(), vec![10, 20]); // Let's suspend them again, directly Society::suspend_member(&20); - assert_eq!(>::get(20), Some(())); + assert_eq!(>::get(20), true); // Suspension judgement origin does not forgive the suspended member assert_ok!(Society::judge_suspended_member(Origin::signed(2), 20, false)); // Cleaned up - assert_eq!(>::get(20), None); + assert_eq!(>::get(20), false); assert_eq!(>::get(), vec![10]); assert_eq!(>::get(20), vec![]); }); @@ -460,10 +500,11 @@ fn unbid_vouch_works() { } #[test] -fn head_cannot_be_removed() { +fn founder_and_head_cannot_be_removed() { EnvBuilder::new().execute(|| { - // 10 is the only member and head + // 10 is the only member, founder, and head assert_eq!(Society::members(), vec![10]); + assert_eq!(Society::founder(), Some(10)); assert_eq!(Society::head(), Some(10)); // 10 can still accumulate strikes assert_ok!(Society::bid(Origin::signed(20), 0)); @@ -485,16 +526,37 @@ fn head_cannot_be_removed() { run_to_block(32); assert_eq!(Society::members(), vec![10, 50]); assert_eq!(Society::head(), Some(50)); + // Founder is unchanged + assert_eq!(Society::founder(), Some(10)); - // 10 can now be suspended for strikes + // 50 can still accumulate strikes assert_ok!(Society::bid(Origin::signed(60), 0)); - run_to_block(36); - // The candidate is rejected, so voting approve will give a strike - assert_ok!(Society::vote(Origin::signed(10), 60, true)); run_to_block(40); - assert_eq!(Strikes::::get(10), 0); - assert_eq!(>::get(10), Some(())); - assert_eq!(Society::members(), vec![50]); + assert_eq!(Strikes::::get(50), 1); + assert_ok!(Society::bid(Origin::signed(70), 0)); + run_to_block(48); + assert_eq!(Strikes::::get(50), 2); + + // Replace the head + assert_ok!(Society::bid(Origin::signed(80), 0)); + run_to_block(52); + assert_ok!(Society::vote(Origin::signed(10), 80, true)); + assert_ok!(Society::vote(Origin::signed(50), 80, true)); + assert_ok!(Society::defender_vote(Origin::signed(10), true)); // Keep defender around + run_to_block(56); + assert_eq!(Society::members(), vec![10, 50, 80]); + assert_eq!(Society::head(), Some(80)); + assert_eq!(Society::founder(), Some(10)); + + // 50 can now be suspended for strikes + assert_ok!(Society::bid(Origin::signed(90), 0)); + run_to_block(60); + // The candidate is rejected, so voting approve will give a strike + assert_ok!(Society::vote(Origin::signed(50), 90, true)); + run_to_block(64); + assert_eq!(Strikes::::get(50), 0); + assert_eq!(>::get(50), true); + assert_eq!(Society::members(), vec![10, 80]); }); } @@ -505,6 +567,11 @@ fn challenges_work() { assert_ok!(Society::add_member(&20)); assert_ok!(Society::add_member(&30)); assert_ok!(Society::add_member(&40)); + // Votes are empty + assert_eq!(>::get(10), None); + assert_eq!(>::get(20), None); + assert_eq!(>::get(30), None); + assert_eq!(>::get(40), None); // Check starting point assert_eq!(Society::members(), vec![10, 20, 30, 40]); assert_eq!(Society::defender(), None); @@ -528,6 +595,11 @@ fn challenges_work() { run_to_block(24); // 20 survives assert_eq!(Society::members(), vec![10, 20, 30, 40]); + // Votes are reset + assert_eq!(>::get(10), None); + assert_eq!(>::get(20), None); + assert_eq!(>::get(30), None); + assert_eq!(>::get(40), None); // One more time assert_eq!(Society::defender(), Some(20)); // 2 people say accept, 2 reject @@ -538,9 +610,14 @@ fn challenges_work() { run_to_block(32); // 20 is suspended assert_eq!(Society::members(), vec![10, 30, 40]); - assert_eq!(Society::suspended_member(20), Some(())); + assert_eq!(Society::suspended_member(20), true); // New defender is chosen assert_eq!(Society::defender(), Some(40)); + // Votes are reset + assert_eq!(>::get(10), None); + assert_eq!(>::get(20), None); + assert_eq!(>::get(30), None); + assert_eq!(>::get(40), None); }); } @@ -610,7 +687,7 @@ fn vouching_handles_removed_member_with_bid() { assert_eq!(>::get(20), Some(VouchingStatus::Vouching)); // Suspend that member Society::suspend_member(&20); - assert_eq!(>::get(20), Some(())); + assert_eq!(>::get(20), true); // Nothing changes yet assert_eq!(>::get(), vec![create_bid(1000, 30, BidKind::Vouch(20, 100))]); assert_eq!(>::get(20), Some(VouchingStatus::Vouching)); @@ -637,7 +714,7 @@ fn vouching_handles_removed_member_with_candidate() { assert_eq!(Society::candidates(), vec![create_bid(1000, 30, BidKind::Vouch(20, 100))]); // Suspend that member Society::suspend_member(&20); - assert_eq!(>::get(20), Some(())); + assert_eq!(>::get(20), true); // Nothing changes yet assert_eq!(Society::candidates(), vec![create_bid(1000, 30, BidKind::Vouch(20, 100))]); assert_eq!(>::get(20), Some(VouchingStatus::Vouching)); diff --git a/frame/staking/Cargo.toml b/frame/staking/Cargo.toml index e3e6fcbe7badf..52584cf47f47d 100644 --- a/frame/staking/Cargo.toml +++ b/frame/staking/Cargo.toml @@ -6,7 +6,6 @@ edition = "2018" [dependencies] serde = { version = "1.0.101", optional = true } -safe-mix = { version = "1.0.0", default-features = false } codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } sp-keyring = { version = "2.0.0", optional = true, path = "../../primitives/keyring" } sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } @@ -31,7 +30,6 @@ migrate = [] default = ["std"] std = [ "serde", - "safe-mix/std", "sp-keyring", "codec/std", "sp-std/std", diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index a59aa5d0c8e83..326a01599034c 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -283,7 +283,7 @@ use sp_staking::{ use sp_runtime::{Serialize, Deserialize}; use frame_system::{self as system, ensure_signed, ensure_root}; -use sp_phragmen::{ExtendedBalance, PhragmenStakedAssignment}; +use sp_phragmen::ExtendedBalance; const DEFAULT_MINIMUM_VALIDATOR_COUNT: u32 = 4; const MAX_NOMINATIONS: usize = 16; @@ -1508,12 +1508,10 @@ impl Module { .collect::>(); let assignments = phragmen_result.assignments; - let to_votes = |b: BalanceOf| - , u64>>::convert(b) as ExtendedBalance; let to_balance = |e: ExtendedBalance| >>::convert(e); - let mut supports = sp_phragmen::build_support_map::<_, _, _, T::CurrencyToVote>( + let supports = sp_phragmen::build_support_map::<_, _, _, T::CurrencyToVote>( &elected_stashes, &assignments, Self::slashable_balance_of, @@ -1628,7 +1626,6 @@ impl Module { /// For each element in the iterator the given number of points in u32 is added to the /// validator, thus duplicates are handled. pub fn reward_by_indices(validators_points: impl IntoIterator) { - // TODO: This can be optimised once #3302 is implemented. let current_elected_len = >::current_elected().len() as u32; CurrentEraPointsEarned::mutate(|rewards| { diff --git a/frame/staking/src/slashing.rs b/frame/staking/src/slashing.rs index 6d591603fdbcc..7322b9a1d3118 100644 --- a/frame/staking/src/slashing.rs +++ b/frame/staking/src/slashing.rs @@ -649,9 +649,6 @@ fn pay_reporters( T::Slash::on_unbalanced(value_slashed); } -// TODO: function for undoing a slash. -// - #[cfg(test)] mod tests { use super::*; diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 555edac979d87..c290c6a8581d4 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -90,29 +90,16 @@ fn basic_setup_works() { ); assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); - if cfg!(feature = "equalize") { - assert_eq!( - Staking::stakers(11), - Exposure { total: 1250, own: 1000, others: vec![ IndividualExposure { who: 101, value: 250 }] } - ); - assert_eq!( - Staking::stakers(21), - Exposure { total: 1250, own: 1000, others: vec![ IndividualExposure { who: 101, value: 250 }] } - ); - // initial slot_stake - assert_eq!(Staking::slot_stake(), 1250); - } else { - assert_eq!( - Staking::stakers(11), - Exposure { total: 1125, own: 1000, others: vec![ IndividualExposure { who: 101, value: 125 }] } - ); - assert_eq!( - Staking::stakers(21), - Exposure { total: 1375, own: 1000, others: vec![ IndividualExposure { who: 101, value: 375 }] } - ); - // initial slot_stake - assert_eq!(Staking::slot_stake(), 1125); - } + assert_eq!( + Staking::stakers(11), + Exposure { total: 1125, own: 1000, others: vec![ IndividualExposure { who: 101, value: 125 }] } + ); + assert_eq!( + Staking::stakers(21), + Exposure { total: 1375, own: 1000, others: vec![ IndividualExposure { who: 101, value: 375 }] } + ); + // initial slot_stake + assert_eq!(Staking::slot_stake(), 1125); // The number of validators required. @@ -488,57 +475,30 @@ fn nominating_and_rewards_should_work() { // ------ check the staked value of all parties. - if cfg!(feature = "equalize") { - // total expo of 10, with 1200 coming from nominators (externals), according to phragmen. - assert_eq!(Staking::stakers(11).own, 1000); - assert_eq_error_rate!(Staking::stakers(11).total, 1000 + 1000, 2); - // 2 and 4 supported 10, each with stake 600, according to phragmen. - assert_eq!( - Staking::stakers(11).others.iter().map(|e| e.value).collect::>>(), - vec![600, 400] - ); - assert_eq!( - Staking::stakers(11).others.iter().map(|e| e.who).collect::>(), - vec![3, 1] - ); - // total expo of 20, with 500 coming from nominators (externals), according to phragmen. - assert_eq!(Staking::stakers(21).own, 1000); - assert_eq_error_rate!(Staking::stakers(21).total, 1000 + 1000, 2); - // 2 and 4 supported 20, each with stake 250, according to phragmen. - assert_eq!( - Staking::stakers(21).others.iter().map(|e| e.value).collect::>>(), - vec![400, 600] - ); - assert_eq!( - Staking::stakers(21).others.iter().map(|e| e.who).collect::>(), - vec![3, 1] - ); - } else { - // total expo of 10, with 1200 coming from nominators (externals), according to phragmen. - assert_eq!(Staking::stakers(11).own, 1000); - assert_eq!(Staking::stakers(11).total, 1000 + 800); - // 2 and 4 supported 10, each with stake 600, according to phragmen. - assert_eq!( - Staking::stakers(11).others.iter().map(|e| e.value).collect::>>(), - vec![400, 400] - ); - assert_eq!( - Staking::stakers(11).others.iter().map(|e| e.who).collect::>(), - vec![3, 1] - ); - // total expo of 20, with 500 coming from nominators (externals), according to phragmen. - assert_eq!(Staking::stakers(21).own, 1000); - assert_eq_error_rate!(Staking::stakers(21).total, 1000 + 1200, 2); - // 2 and 4 supported 20, each with stake 250, according to phragmen. - assert_eq!( - Staking::stakers(21).others.iter().map(|e| e.value).collect::>>(), - vec![600, 600] - ); - assert_eq!( - Staking::stakers(21).others.iter().map(|e| e.who).collect::>(), - vec![3, 1] - ); - } + // total expo of 10, with 1200 coming from nominators (externals), according to phragmen. + assert_eq!(Staking::stakers(11).own, 1000); + assert_eq!(Staking::stakers(11).total, 1000 + 800); + // 2 and 4 supported 10, each with stake 600, according to phragmen. + assert_eq!( + Staking::stakers(11).others.iter().map(|e| e.value).collect::>>(), + vec![400, 400] + ); + assert_eq!( + Staking::stakers(11).others.iter().map(|e| e.who).collect::>(), + vec![3, 1] + ); + // total expo of 20, with 500 coming from nominators (externals), according to phragmen. + assert_eq!(Staking::stakers(21).own, 1000); + assert_eq_error_rate!(Staking::stakers(21).total, 1000 + 1200, 2); + // 2 and 4 supported 20, each with stake 250, according to phragmen. + assert_eq!( + Staking::stakers(21).others.iter().map(|e| e.value).collect::>>(), + vec![600, 600] + ); + assert_eq!( + Staking::stakers(21).others.iter().map(|e| e.who).collect::>(), + vec![3, 1] + ); // They are not chosen anymore assert_eq!(Staking::stakers(31).total, 0); @@ -559,59 +519,31 @@ fn nominating_and_rewards_should_work() { let payout_for_10 = total_payout_1 / 3; let payout_for_20 = 2 * total_payout_1 / 3; - if cfg!(feature = "equalize") { - // Nominator 2: has [400 / 2000 ~ 1 / 5 from 10] + [600 / 2000 ~ 3 / 10 from 20]'s reward. - assert_eq_error_rate!( - Balances::total_balance(&2), - initial_balance + payout_for_10 / 5 + payout_for_20 * 3 / 10, - 2, - ); - // Nominator 4: has [400 / 2000 ~ 1 / 5 from 20] + [600 / 2000 ~ 3 / 10 from 10]'s reward. - assert_eq_error_rate!( - Balances::total_balance(&4), - initial_balance + payout_for_20 / 5 + payout_for_10 * 3 / 10, - 2, - ); - - // Validator 10: got 1000 / 2000 external stake. - assert_eq_error_rate!( - Balances::total_balance(&10), - initial_balance + payout_for_10 / 2, - 1, - ); - // Validator 20: got 1000 / 2000 external stake. - assert_eq_error_rate!( - Balances::total_balance(&20), - initial_balance + payout_for_20 / 2, - 1, - ); - } else { - // Nominator 2: has [400/1800 ~ 2/9 from 10] + [600/2200 ~ 3/11 from 20]'s reward. ==> 2/9 + 3/11 - assert_eq_error_rate!( - Balances::total_balance(&2), - initial_balance + (2 * payout_for_10 / 9 + 3 * payout_for_20 / 11), - 1, - ); - // Nominator 4: has [400/1800 ~ 2/9 from 10] + [600/2200 ~ 3/11 from 20]'s reward. ==> 2/9 + 3/11 - assert_eq_error_rate!( - Balances::total_balance(&4), - initial_balance + (2 * payout_for_10 / 9 + 3 * payout_for_20 / 11), - 1, - ); - - // Validator 10: got 800 / 1800 external stake => 8/18 =? 4/9 => Validator's share = 5/9 - assert_eq_error_rate!( - Balances::total_balance(&10), - initial_balance + 5 * payout_for_10 / 9, - 1, - ); - // Validator 20: got 1200 / 2200 external stake => 12/22 =? 6/11 => Validator's share = 5/11 - assert_eq_error_rate!( - Balances::total_balance(&20), - initial_balance + 5 * payout_for_20 / 11, - 1, - ); - } + // Nominator 2: has [400/1800 ~ 2/9 from 10] + [600/2200 ~ 3/11 from 20]'s reward. ==> 2/9 + 3/11 + assert_eq_error_rate!( + Balances::total_balance(&2), + initial_balance + (2 * payout_for_10 / 9 + 3 * payout_for_20 / 11), + 1, + ); + // Nominator 4: has [400/1800 ~ 2/9 from 10] + [600/2200 ~ 3/11 from 20]'s reward. ==> 2/9 + 3/11 + assert_eq_error_rate!( + Balances::total_balance(&4), + initial_balance + (2 * payout_for_10 / 9 + 3 * payout_for_20 / 11), + 1, + ); + + // Validator 10: got 800 / 1800 external stake => 8/18 =? 4/9 => Validator's share = 5/9 + assert_eq_error_rate!( + Balances::total_balance(&10), + initial_balance + 5 * payout_for_10 / 9, + 1, + ); + // Validator 20: got 1200 / 2200 external stake => 12/22 =? 6/11 => Validator's share = 5/11 + assert_eq_error_rate!( + Balances::total_balance(&20), + initial_balance + 5 * payout_for_20 / 11, + 1, + ); check_exposure_all(); check_nominator_all(); @@ -1778,51 +1710,6 @@ fn bond_with_little_staked_value_bounded_by_slot_stake() { }); } -#[cfg(feature = "equalize")] -#[test] -fn phragmen_linear_worse_case_equalize() { - ExtBuilder::default() - .nominate(false) - .validator_pool(true) - .fair(true) - .build() - .execute_with(|| { - bond_validator(50, 1000); - bond_validator(60, 1000); - bond_validator(70, 1000); - - bond_nominator(2, 2000, vec![11]); - bond_nominator(4, 1000, vec![11, 21]); - bond_nominator(6, 1000, vec![21, 31]); - bond_nominator(8, 1000, vec![31, 41]); - bond_nominator(110, 1000, vec![41, 51]); - bond_nominator(120, 1000, vec![51, 61]); - bond_nominator(130, 1000, vec![61, 71]); - - for i in &[10, 20, 30, 40, 50, 60, 70] { - assert_ok!(Staking::set_payee(Origin::signed(*i), RewardDestination::Controller)); - } - - assert_eq_uvec!(validator_controllers(), vec![40, 30]); - assert_ok!(Staking::set_validator_count(Origin::ROOT, 7)); - - start_era(1); - - assert_eq_uvec!(validator_controllers(), vec![10, 60, 40, 20, 50, 30, 70]); - - assert_eq_error_rate!(Staking::stakers(11).total, 3000, 2); - assert_eq_error_rate!(Staking::stakers(21).total, 2255, 2); - assert_eq_error_rate!(Staking::stakers(31).total, 2255, 2); - assert_eq_error_rate!(Staking::stakers(41).total, 1925, 2); - assert_eq_error_rate!(Staking::stakers(51).total, 1870, 2); - assert_eq_error_rate!(Staking::stakers(61).total, 1890, 2); - assert_eq_error_rate!(Staking::stakers(71).total, 1800, 2); - - check_exposure_all(); - check_nominator_all(); - }) -} - #[test] fn new_era_elects_correct_number_of_validators() { ExtBuilder::default() @@ -2195,9 +2082,6 @@ fn reporters_receive_their_slice() { // amount. ExtBuilder::default().build().execute_with(|| { // The reporters' reward is calculated from the total exposure. - #[cfg(feature = "equalize")] - let initial_balance = 1250; - #[cfg(not(feature = "equalize"))] let initial_balance = 1125; assert_eq!(Staking::stakers(&11).total, initial_balance); @@ -2229,9 +2113,6 @@ fn subsequent_reports_in_same_span_pay_out_less() { // amount. ExtBuilder::default().build().execute_with(|| { // The reporters' reward is calculated from the total exposure. - #[cfg(feature = "equalize")] - let initial_balance = 1250; - #[cfg(not(feature = "equalize"))] let initial_balance = 1125; assert_eq!(Staking::stakers(&11).total, initial_balance); diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index 1695375d36832..d8460ef7e8aea 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -8,7 +8,7 @@ edition = "2018" log = "0.4" serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.0.6", default-features = false, features = ["derive"] } -frame-metadata = { version = "2.0.0", default-features = false, path = "../metadata" } +frame-metadata = { version = "10.0.0", default-features = false, path = "../metadata" } sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } sp-io ={ path = "../../primitives/io", default-features = false } sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } @@ -18,7 +18,7 @@ sp-inherents = { version = "2.0.0", default-features = false, path = "../../prim frame-support-procedural = { version = "2.0.0", path = "./procedural" } paste = "0.1.6" once_cell = { version = "0.2.4", default-features = false, optional = true } -sp-state-machine = { version = "2.0.0", optional = true, path = "../../primitives/state-machine" } +sp-state-machine = { version = "0.8", optional = true, path = "../../primitives/state-machine" } bitmask = { version = "0.5.0", default-features = false } impl-trait-for-tuples = "0.1.3" tracing = { version = "0.1.10", optional = true } diff --git a/frame/support/src/storage/generator/mod.rs b/frame/support/src/storage/generator/mod.rs index 386e3d1cecaef..476c6d4472054 100644 --- a/frame/support/src/storage/generator/mod.rs +++ b/frame/support/src/storage/generator/mod.rs @@ -20,6 +20,8 @@ //! implementation of StorageValue for this type). //! //! They are used by `decl_storage`. +//! +//! This is internal api and is subject to change. mod linked_map; mod map; diff --git a/frame/support/src/storage/mod.rs b/frame/support/src/storage/mod.rs index 0908adae3b604..f629ac3eb91a1 100644 --- a/frame/support/src/storage/mod.rs +++ b/frame/support/src/storage/mod.rs @@ -23,6 +23,7 @@ use crate::{traits::Len, hash::{Twox128, StorageHasher}}; pub mod unhashed; pub mod hashed; pub mod child; +#[doc(hidden)] pub mod generator; /// A trait for working with macro-generated storage values under the substrate storage API. diff --git a/frame/support/src/weights.rs b/frame/support/src/weights.rs index f1092b500230b..e44ab16458836 100644 --- a/frame/support/src/weights.rs +++ b/frame/support/src/weights.rs @@ -236,6 +236,36 @@ impl SimpleDispatchInfo { } } +/// A struct to represent a weight which is a function of the input arguments. The given items have +/// the following types: +/// +/// - `F`: a closure with the same argument list as the dispatched, wrapped in a tuple. +/// - `DispatchClass`: class of the dispatch. +/// - `bool`: whether this dispatch pays fee or not. +pub struct FunctionOf(pub F, pub DispatchClass, pub bool); + +impl WeighData for FunctionOf +where + F : Fn(Args) -> Weight +{ + fn weigh_data(&self, args: Args) -> Weight { + (self.0)(args) + } +} + +impl ClassifyDispatch for FunctionOf { + fn classify_dispatch(&self, _: Args) -> DispatchClass { + self.1.clone() + } +} + +impl PaysFee for FunctionOf { + fn pays_fee(&self, _: T) -> bool { + self.2 + } +} + + /// Implementation for unchecked extrinsic. impl GetDispatchInfo for UncheckedExtrinsic @@ -271,3 +301,46 @@ impl GetDispatchInfo for sp_runtime::testing::TestX } } } + +#[cfg(test)] +#[allow(dead_code)] +mod tests { + use crate::decl_module; + use super::*; + + pub trait Trait { + type Origin; + type Balance; + type BlockNumber; + } + + pub struct TraitImpl {} + + impl Trait for TraitImpl { + type Origin = u32; + type BlockNumber = u32; + type Balance = u32; + } + + decl_module! { + pub struct Module for enum Call where origin: T::Origin { + // no arguments, fixed weight + #[weight = SimpleDispatchInfo::FixedNormal(1000)] + fn f0(_origin) { unimplemented!(); } + + // weight = a x 10 + b + #[weight = FunctionOf(|args: (&u32, &u32)| args.0 * 10 + args.1, DispatchClass::Normal, true)] + fn f11(_origin, _a: u32, _eb: u32) { unimplemented!(); } + + #[weight = FunctionOf(|_: (&u32, &u32)| 0, DispatchClass::Operational, true)] + fn f12(_origin, _a: u32, _eb: u32) { unimplemented!(); } + } + } + + #[test] + fn weights_are_correct() { + assert_eq!(Call::::f11(10, 20).get_dispatch_info().weight, 120); + assert_eq!(Call::::f11(10, 20).get_dispatch_info().class, DispatchClass::Normal); + assert_eq!(Call::::f0().get_dispatch_info().weight, 1000); + } +} diff --git a/frame/support/test/Cargo.toml b/frame/support/test/Cargo.toml index 92dc32e478f1d..0c73dcd65e4b8 100644 --- a/frame/support/test/Cargo.toml +++ b/frame/support/test/Cargo.toml @@ -8,7 +8,7 @@ edition = "2018" serde = { version = "1.0.101", default-features = false, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } sp-io ={ path = "../../../primitives/io", default-features = false } -sp-state-machine = { version = "2.0.0", optional = true, path = "../../../primitives/state-machine" } +sp-state-machine = { version = "0.8", optional = true, path = "../../../primitives/state-machine" } frame-support = { version = "2.0.0", default-features = false, path = "../" } sp-inherents = { version = "2.0.0", default-features = false, path = "../../../primitives/inherents" } sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } diff --git a/frame/system/Cargo.toml b/frame/system/Cargo.toml index 227aecee2ee43..098c320f454e5 100644 --- a/frame/system/Cargo.toml +++ b/frame/system/Cargo.toml @@ -6,7 +6,6 @@ edition = "2018" [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -safe-mix = { version = "1.0.0", default-features = false } codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } @@ -18,12 +17,13 @@ impl-trait-for-tuples = "0.1.3" [dev-dependencies] criterion = "0.2.11" +sp-externalities = { version = "0.8.0", path = "../../primitives/externalities" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } [features] default = ["std"] std = [ "serde", - "safe-mix/std", "codec/std", "sp-core/std", "sp-std/std", diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 9e7678a7ede34..478fd780b98fa 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -110,7 +110,7 @@ use sp_runtime::{ }, }; -use sp_core::storage::well_known_keys; +use sp_core::{ChangesTrieConfiguration, storage::well_known_keys}; use frame_support::{ decl_module, decl_event, decl_storage, decl_error, storage, Parameter, traits::{Contains, Get, ModuleToIndex, OnReapAccount}, @@ -121,9 +121,6 @@ use codec::{Encode, Decode}; #[cfg(any(feature = "std", test))] use sp_io::TestExternalities; -#[cfg(any(feature = "std", test))] -use sp_core::ChangesTrieConfiguration; - pub mod offchain; /// Handler for when a new account has been created. @@ -259,11 +256,56 @@ decl_module! { storage::unhashed::put_raw(well_known_keys::HEAP_PAGES, &pages.encode()); } - /// Set the new code. + /// Set the new runtime code. + #[weight = SimpleDispatchInfo::FixedOperational(200_000)] + pub fn set_code(origin, code: Vec) { + ensure_root(origin)?; + + let current_version = T::Version::get(); + let new_version = sp_io::misc::runtime_version(&code) + .and_then(|v| RuntimeVersion::decode(&mut &v[..]).ok()) + .ok_or_else(|| Error::::FailedToExtractRuntimeVersion)?; + + if new_version.spec_name != current_version.spec_name { + Err(Error::::InvalidSpecName)? + } + + if new_version.spec_version < current_version.spec_version { + Err(Error::::SpecVersionNotAllowedToDecrease)? + } else if new_version.spec_version == current_version.spec_version { + if new_version.impl_version < current_version.impl_version { + Err(Error::::ImplVersionNotAllowedToDecrease)? + } else if new_version.impl_version == current_version.impl_version { + Err(Error::::SpecOrImplVersionNeedToIncrease)? + } + } + + storage::unhashed::put_raw(well_known_keys::CODE, &code); + } + + /// Set the new runtime code without doing any checks of the given `code`. #[weight = SimpleDispatchInfo::FixedOperational(200_000)] - pub fn set_code(origin, new: Vec) { + pub fn set_code_without_checks(origin, code: Vec) { + ensure_root(origin)?; + storage::unhashed::put_raw(well_known_keys::CODE, &code); + } + + /// Set the new changes trie configuration. + #[weight = SimpleDispatchInfo::FixedOperational(20_000)] + pub fn set_changes_trie_config(origin, changes_trie_config: Option) { ensure_root(origin)?; - storage::unhashed::put_raw(well_known_keys::CODE, &new); + match changes_trie_config.clone() { + Some(changes_trie_config) => storage::unhashed::put_raw( + well_known_keys::CHANGES_TRIE_CONFIG, + &changes_trie_config.encode(), + ), + None => storage::unhashed::kill(well_known_keys::CHANGES_TRIE_CONFIG), + } + + let log = generic::DigestItem::ChangesTrieSignal( + generic::ChangesTrieSignal::NewConfiguration(changes_trie_config), + ); + Self::deposit_log(log.into()); } /// Set some items of storage. @@ -327,7 +369,24 @@ decl_event!( decl_error! { /// Error for the System module - pub enum Error for Module {} + pub enum Error for Module { + /// The name of specification does not match between the current runtime + /// and the new runtime. + InvalidSpecName, + /// The specification version is not allowed to decrease between the current runtime + /// and the new runtime. + SpecVersionNotAllowedToDecrease, + /// The implementation version is not allowed to decrease between the current runtime + /// and the new runtime. + ImplVersionNotAllowedToDecrease, + /// The specification or the implementation version need to increase between the + /// current runtime and the new runtime. + SpecOrImplVersionNeedToIncrease, + /// Failed to extract the runtime version from the new runtime. + /// + /// Either calling `Core_version` or decoding `RuntimeVersion` failed. + FailedToExtractRuntimeVersion, + } } /// Origin for the System module. @@ -1189,6 +1248,14 @@ mod tests { pub const MaximumBlockWeight: Weight = 1024; pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); pub const MaximumBlockLength: u32 = 1024; + pub const Version: RuntimeVersion = RuntimeVersion { + spec_name: sp_version::create_runtime_str!("test"), + impl_name: sp_version::create_runtime_str!("system-test"), + authoring_version: 1, + spec_version: 1, + impl_version: 1, + apis: sp_version::create_apis_vec!([]), + }; } impl Trait for Test { @@ -1206,7 +1273,7 @@ mod tests { type MaximumBlockWeight = MaximumBlockWeight; type AvailableBlockRatio = AvailableBlockRatio; type MaximumBlockLength = MaximumBlockLength; - type Version = (); + type Version = Version; type ModuleToIndex = (); } @@ -1503,7 +1570,7 @@ mod tests { .validate(&1, CALL, op, len) .unwrap() .priority; - assert_eq!(priority, Bounded::max_value()); + assert_eq!(priority, u64::max_value()); }) } @@ -1562,4 +1629,65 @@ mod tests { assert_eq!(ext.validate(&1, CALL, normal, len).unwrap().longevity, 15); }) } + + + #[test] + fn set_code_checks_works() { + struct CallInWasm(Vec); + + impl sp_core::traits::CallInWasm for CallInWasm { + fn call_in_wasm( + &self, + _: &[u8], + _: &str, + _: &[u8], + _: &mut dyn sp_externalities::Externalities, + ) -> Result, String> { + Ok(self.0.clone()) + } + } + + let test_data = vec![ + ("test", 1, 2, Ok(())), + ("test", 1, 1, Err(Error::::SpecOrImplVersionNeedToIncrease)), + ("test2", 1, 1, Err(Error::::InvalidSpecName)), + ("test", 2, 1, Ok(())), + ("test", 0, 1, Err(Error::::SpecVersionNotAllowedToDecrease)), + ("test", 1, 0, Err(Error::::ImplVersionNotAllowedToDecrease)), + ]; + + for (spec_name, spec_version, impl_version, expected) in test_data.into_iter() { + let version = RuntimeVersion { + spec_name: spec_name.into(), + spec_version, + impl_version, + ..Default::default() + }; + let call_in_wasm = CallInWasm(version.encode()); + + let mut ext = new_test_ext(); + ext.register_extension(sp_core::traits::CallInWasmExt::new(call_in_wasm)); + ext.execute_with(|| { + let res = System::set_code( + RawOrigin::Root.into(), + vec![1, 2, 3, 4], + ); + + assert_eq!(expected.map_err(DispatchError::from), res); + }); + } + } + + #[test] + fn set_code_with_real_wasm_blob() { + let executor = substrate_test_runtime_client::new_native_executor(); + let mut ext = new_test_ext(); + ext.register_extension(sp_core::traits::CallInWasmExt::new(executor)); + ext.execute_with(|| { + System::set_code( + RawOrigin::Root.into(), + substrate_test_runtime_client::runtime::WASM_BINARY.to_vec(), + ).unwrap(); + }); + } } diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index bae3096f3cdd1..00dbf6bc66b73 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -151,7 +151,7 @@ impl ChargeTransactionPayment { /// transactions can have a tip. /// /// final_fee = base_fee + targeted_fee_adjustment(len_fee + weight_fee) + tip; - fn compute_fee( + pub fn compute_fee( len: u32, info: ::DispatchInfo, tip: BalanceOf, @@ -218,20 +218,23 @@ impl SignedExtension for ChargeTransactionPayment // pay any fees. let tip = self.0; let fee = Self::compute_fee(len as u32, info, tip); - let imbalance = match T::Currency::withdraw( - who, - fee, - if tip.is_zero() { - WithdrawReason::TransactionPayment.into() - } else { - WithdrawReason::TransactionPayment | WithdrawReason::Tip - }, - ExistenceRequirement::KeepAlive, - ) { - Ok(imbalance) => imbalance, - Err(_) => return InvalidTransaction::Payment.into(), - }; - T::OnTransactionPayment::on_unbalanced(imbalance); + // Only mess with balances if fee is not zero. + if !fee.is_zero() { + let imbalance = match T::Currency::withdraw( + who, + fee, + if tip.is_zero() { + WithdrawReason::TransactionPayment.into() + } else { + WithdrawReason::TransactionPayment | WithdrawReason::Tip + }, + ExistenceRequirement::KeepAlive, + ) { + Ok(imbalance) => imbalance, + Err(_) => return InvalidTransaction::Payment.into(), + }; + T::OnTransactionPayment::on_unbalanced(imbalance); + } let mut r = ValidTransaction::default(); // NOTE: we probably want to maximize the _fee (of any type) per weight unit_ here, which diff --git a/primitives/api/Cargo.toml b/primitives/api/Cargo.toml index 1e3e555456000..b9f736570ac4d 100644 --- a/primitives/api/Cargo.toml +++ b/primitives/api/Cargo.toml @@ -11,7 +11,7 @@ sp-core = { version = "2.0.0", default-features = false, path = "../core" } sp-std = { version = "2.0.0", default-features = false, path = "../std" } sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } sp-version = { version = "2.0.0", default-features = false, path = "../version" } -sp-state-machine = { version = "2.0.0", optional = true, path = "../../primitives/state-machine" } +sp-state-machine = { version = "0.8", optional = true, path = "../../primitives/state-machine" } hash-db = { version = "0.15.2", optional = true } [dev-dependencies] diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index dc4031fbc4b8a..770a843bfa6c1 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -323,12 +323,13 @@ fn generate_runtime_api_base_structures() -> Result { }) } - fn into_storage_changes< - T: #crate_::ChangesTrieStorage<#crate_::HasherFor, #crate_::NumberFor> - >( + fn into_storage_changes( &self, backend: &Self::StateBackend, - changes_trie_storage: Option<&T>, + changes_trie_state: Option<&#crate_::ChangesTrieState< + #crate_::HasherFor, + #crate_::NumberFor, + >>, parent_hash: Block::Hash, ) -> std::result::Result< #crate_::StorageChanges, @@ -337,7 +338,7 @@ fn generate_runtime_api_base_structures() -> Result { self.initialized_block.borrow_mut().take(); self.changes.replace(Default::default()).into_storage_changes( backend, - changes_trie_storage, + changes_trie_state, parent_hash, self.storage_transaction_cache.replace(Default::default()), ) diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 122e863228966..bde00d48172e8 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -36,7 +36,7 @@ extern crate self as sp_api; #[doc(hidden)] #[cfg(feature = "std")] pub use sp_state_machine::{ - OverlayedChanges, StorageProof, Backend as StateBackend, ChangesTrieStorage, + OverlayedChanges, StorageProof, Backend as StateBackend, ChangesTrieState, }; #[doc(hidden)] #[cfg(feature = "std")] @@ -325,10 +325,10 @@ pub trait ApiExt: ApiErrorExt { /// api functions. /// /// After executing this function, all collected changes are reset. - fn into_storage_changes, NumberFor>>( + fn into_storage_changes( &self, backend: &Self::StateBackend, - changes_trie_storage: Option<&T>, + changes_trie_state: Option<&ChangesTrieState, NumberFor>>, parent_hash: Block::Hash, ) -> Result, String> where Self: Sized; } diff --git a/primitives/api/test/Cargo.toml b/primitives/api/test/Cargo.toml index f1f0dfb50f12b..5892063bd1fc4 100644 --- a/primitives/api/test/Cargo.toml +++ b/primitives/api/test/Cargo.toml @@ -12,7 +12,7 @@ sp-runtime = { version = "2.0.0", path = "../../runtime" } sp-blockchain = { version = "2.0.0", path = "../../blockchain" } sp-consensus = { version = "0.8", path = "../../../primitives/consensus/common" } codec = { package = "parity-scale-codec", version = "1.0.0" } -sp-state-machine = { version = "2.0.0", path = "../../../primitives/state-machine" } +sp-state-machine = { version = "0.8", path = "../../../primitives/state-machine" } trybuild = "1.0.17" rustversion = "1.0.0" diff --git a/primitives/api/test/tests/runtime_calls.rs b/primitives/api/test/tests/runtime_calls.rs index f45df7c517254..66412edae026d 100644 --- a/primitives/api/test/tests/runtime_calls.rs +++ b/primitives/api/test/tests/runtime_calls.rs @@ -185,7 +185,7 @@ fn record_proof_works() { // Use the proof backend to execute `execute_block`. let mut overlay = Default::default(); let executor = NativeExecutor::::new(WasmExecutionMethod::Interpreted, None); - execution_proof_check_on_trie_backend( + execution_proof_check_on_trie_backend::<_, u64, _>( &backend, &mut overlay, &executor, diff --git a/primitives/blockchain/Cargo.toml b/primitives/blockchain/Cargo.toml index 4f3f0d0b5fad0..a3c442094daaf 100644 --- a/primitives/blockchain/Cargo.toml +++ b/primitives/blockchain/Cargo.toml @@ -13,4 +13,4 @@ codec = { package = "parity-scale-codec", version = "1.0.0", default-features = sp-consensus = { version = "0.8", path = "../consensus/common" } sp-runtime = { version = "2.0.0", path = "../runtime" } sp-block-builder = { version = "2.0.0", path = "../block-builder" } -sp-state-machine = { version = "2.0.0", path = "../state-machine" } +sp-state-machine = { version = "0.8", path = "../state-machine" } diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index 8571d76801079..101dcd1443d6b 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -228,11 +228,13 @@ pub trait Cache: Send + Sync { /// Returns cached value by the given key. /// /// Returned tuple is the range where value has been active and the value itself. + /// Fails if read from cache storage fails or if the value for block is discarded + /// (i.e. if block is earlier that best finalized, but it is not in canonical chain). fn get_at( &self, key: &well_known_cache_keys::Id, block: &BlockId, - ) -> Option<((NumberFor, Block::Hash), Option<(NumberFor, Block::Hash)>, Vec)>; + ) -> Result, Block::Hash), Option<(NumberFor, Block::Hash)>, Vec)>>; } /// Blockchain info diff --git a/primitives/blockchain/src/error.rs b/primitives/blockchain/src/error.rs index dbab93738e79f..d51ab787c72fb 100644 --- a/primitives/blockchain/src/error.rs +++ b/primitives/blockchain/src/error.rs @@ -106,6 +106,9 @@ pub enum Error { /// Changes tries are not supported. #[display(fmt = "Changes tries are not supported by the runtime")] ChangesTriesNotSupported, + /// Error reading changes tries configuration. + #[display(fmt = "Error reading changes tries configuration")] + ErrorReadingChangesTriesConfig, /// Key changes query has failed. #[display(fmt = "Failed to check changes proof: {}", _0)] #[from(ignore)] diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml index 4c38e31914700..3ae79caa43126 100644 --- a/primitives/consensus/common/Cargo.toml +++ b/primitives/consensus/common/Cargo.toml @@ -11,7 +11,7 @@ libp2p = { version = "0.14.0-alpha.1", default-features = false } log = "0.4.8" sp-core = { path= "../../core" } sp-inherents = { version = "2.0.0", path = "../../inherents" } -sp-state-machine = { version = "2.0.0", path = "../../../primitives/state-machine" } +sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } futures = { version = "0.3.1", features = ["thread-pool"] } futures-timer = "0.4.0" sp-std = { version = "2.0.0", path = "../../std" } diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index 1b98ede376857..4927faede06f7 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -74,13 +74,16 @@ pub enum BlockStatus { /// Environment producer for a Consensus instance. Creates proposer instance and communication streams. pub trait Environment { /// The proposer type this creates. - type Proposer: Proposer + 'static; + type Proposer: Proposer + Send + 'static; + /// A future that resolves to the proposer. + type CreateProposer: Future> + + Send + Unpin + 'static; /// Error which can occur upon creation. type Error: From + std::fmt::Debug + 'static; /// Initialize the proposal logic on top of a specific header. Provide /// the authorities at that header. - fn init(&mut self, parent_header: &B::Header) -> Result; + fn init(&mut self, parent_header: &B::Header) -> Self::CreateProposer; } /// A proposal that is created by a [`Proposer`]. diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index 8242ff8044466..f7eec82e55daf 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -26,7 +26,7 @@ zeroize = { version = "1.0.0", default-features = false } lazy_static = { version = "1.4.0", default-features = false, optional = true } parking_lot = { version = "0.9.0", optional = true } sp-debug-derive = { version = "2.0.0", path = "../debug-derive" } -sp-externalities = { version = "2.0.0", optional = true, path = "../externalities" } +sp-externalities = { version = "0.8.0", optional = true, path = "../externalities" } sp-storage = { version = "2.0.0", default-features = false, path = "../storage" } libsecp256k1 = { version = "0.3.2", default-features = false } diff --git a/primitives/core/src/changes_trie.rs b/primitives/core/src/changes_trie.rs index 65619a9d45315..d38761ccf0fd1 100644 --- a/primitives/core/src/changes_trie.rs +++ b/primitives/core/src/changes_trie.rs @@ -38,6 +38,17 @@ pub struct ChangesTrieConfiguration { pub digest_levels: u32, } +/// Substrate changes trie configuration range. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct ChangesTrieConfigurationRange { + /// Zero block of configuration. + pub zero: (Number, Hash), + /// Last block of configuration (if configuration has been deactivated at some point). + pub end: Option<(Number, Hash)>, + /// The configuration itself. None if changes tries were disabled in this range. + pub config: Option, +} + impl ChangesTrieConfiguration { /// Create new configuration given digest interval and levels. pub fn new(digest_interval: u32, digest_levels: u32) -> Self { diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index 32e65de40dbaa..ee768ee4a6226 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -75,7 +75,7 @@ mod tests; pub use self::hash::{H160, H256, H512, convert_hash}; pub use self::uint::U256; -pub use changes_trie::ChangesTrieConfiguration; +pub use changes_trie::{ChangesTrieConfiguration, ChangesTrieConfigurationRange}; #[cfg(feature = "full_crypto")] pub use crypto::{DeriveJunction, Pair, Public}; diff --git a/primitives/core/src/traits.rs b/primitives/core/src/traits.rs index cd188e3367707..020b5e2dc5461 100644 --- a/primitives/core/src/traits.rs +++ b/primitives/core/src/traits.rs @@ -80,7 +80,7 @@ sp_externalities::decl_extension! { } /// Code execution engine. -pub trait CodeExecutor: Sized + Send + Sync { +pub trait CodeExecutor: Sized + Send + Sync + CallInWasm + Clone + 'static { /// Externalities error type. type Error: Display + Debug + Send + 'static; @@ -99,3 +99,30 @@ pub trait CodeExecutor: Sized + Send + Sync { native_call: Option, ) -> (Result, Self::Error>, bool); } + +/// Something that can call a method in a WASM blob. +pub trait CallInWasm: Send + Sync { + /// Call the given `method` in the given `wasm_blob` using `call_data` (SCALE encoded arguments) + /// to decode the arguments for the method. + /// + /// Returns the SCALE encoded return value of the method. + fn call_in_wasm( + &self, + wasm_blob: &[u8], + method: &str, + call_data: &[u8], + ext: &mut dyn Externalities, + ) -> Result, String>; +} + +sp_externalities::decl_extension! { + /// The call-in-wasm extension to register/retrieve from the externalities. + pub struct CallInWasmExt(Box); +} + +impl CallInWasmExt { + /// Creates a new instance of `Self`. + pub fn new(inner: T) -> Self { + Self(Box::new(inner)) + } +} diff --git a/primitives/externalities/Cargo.toml b/primitives/externalities/Cargo.toml index 09ce3253f2d98..23889da4ea289 100644 --- a/primitives/externalities/Cargo.toml +++ b/primitives/externalities/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-externalities" -version = "2.0.0" +version = "0.8.0" license = "GPL-3.0" authors = ["Parity Technologies "] edition = "2018" @@ -8,4 +8,4 @@ edition = "2018" [dependencies] sp-storage = { version = "2.0.0", path = "../storage" } sp-std = { version = "2.0.0", path = "../std" } -environmental = { version = "1.0.2" } +environmental = { version = "1.1.1" } diff --git a/primitives/io/Cargo.toml b/primitives/io/Cargo.toml index 04350c9479ede..2f03f230ef8e0 100644 --- a/primitives/io/Cargo.toml +++ b/primitives/io/Cargo.toml @@ -10,10 +10,10 @@ hash-db = { version = "0.15.2", default-features = false } sp-core = { version = "2.0.0", default-features = false, path = "../core" } sp-std = { version = "2.0.0", default-features = false, path = "../std" } libsecp256k1 = { version = "0.3.4", optional = true } -sp-state-machine = { version = "2.0.0", optional = true, path = "../../primitives/state-machine" } +sp-state-machine = { version = "0.8", optional = true, path = "../../primitives/state-machine" } sp-runtime-interface = { version = "2.0.0", default-features = false, path = "../runtime-interface" } sp-trie = { version = "2.0.0", optional = true, path = "../../primitives/trie" } -sp-externalities = { version = "2.0.0", optional = true, path = "../externalities" } +sp-externalities = { version = "0.8.0", optional = true, path = "../externalities" } log = { version = "0.4.8", optional = true } [features] diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 7ac2ae1012272..704477b1fb2bc 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -35,7 +35,7 @@ use sp_std::ops::Deref; #[cfg(feature = "std")] use sp_core::{ crypto::Pair, - traits::KeystoreExt, + traits::{KeystoreExt, CallInWasmExt}, offchain::{OffchainExt, TransactionPoolExt}, hexdisplay::HexDisplay, storage::{ChildStorageKey, ChildInfo}, @@ -49,7 +49,7 @@ use sp_core::{ }; #[cfg(feature = "std")] -use ::sp_trie::{TrieConfiguration, trie_types::Layout}; +use sp_trie::{TrieConfiguration, trie_types::Layout}; use sp_runtime_interface::{runtime_interface, Pointer}; @@ -351,6 +351,25 @@ pub trait Misc { fn print_hex(data: &[u8]) { log::debug!(target: "runtime", "{}", HexDisplay::from(&data)); } + + /// Extract the runtime version of the given wasm blob by calling `Core_version`. + /// + /// Returns the SCALE encoded runtime version and `None` if the call failed. + /// + /// # Performance + /// + /// Calling this function is very expensive and should only be done very occasionally. + /// For getting the runtime version, it requires instantiating the wasm blob and calling a + /// function in this blob. + fn runtime_version(&mut self, wasm: &[u8]) -> Option> { + // Create some dummy externalities, `Core_version` should not write data anyway. + let mut ext = sp_state_machine::BasicExternalities::default(); + + self.extension::() + .expect("No `CallInWasmExt` associated for the current context!") + .call_in_wasm(wasm, "Core_version", &[], &mut ext) + .ok() + } } /// Interfaces for working with crypto related types from within the runtime. diff --git a/primitives/runtime-interface/Cargo.toml b/primitives/runtime-interface/Cargo.toml index cdc94b6a2f226..7ed2257556b47 100644 --- a/primitives/runtime-interface/Cargo.toml +++ b/primitives/runtime-interface/Cargo.toml @@ -8,15 +8,14 @@ edition = "2018" sp-wasm-interface = { version = "2.0.0", optional = true, path = "../wasm-interface" } sp-std = { version = "2.0.0", default-features = false, path = "../std" } sp-runtime-interface-proc-macro = { version = "2.0.0", path = "proc-macro" } -sp-externalities = { version = "2.0.0", optional = true, path = "../externalities" } +sp-externalities = { version = "0.8.0", optional = true, path = "../externalities" } codec = { package = "parity-scale-codec", version = "1.0.6", default-features = false } -environmental = { version = "1.0.2", optional = true } static_assertions = "1.0.0" primitive-types = { version = "0.6.1", default-features = false } [dev-dependencies] sp-runtime-interface-test-wasm = { version = "2.0.0", path = "test-wasm" } -sp-state-machine = { version = "2.0.0", path = "../../primitives/state-machine" } +sp-state-machine = { version = "0.8", path = "../../primitives/state-machine" } sp-core = { version = "2.0.0", path = "../core" } sp-io = { version = "2.0.0", path = "../io" } rustversion = "1.0.0" @@ -29,7 +28,6 @@ std = [ "sp-std/std", "codec/std", "sp-externalities", - "environmental", "primitive-types/std", ] diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs index 55b5eb3a4d616..bdddc5eba70e7 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs @@ -21,10 +21,10 @@ //! functions will prepare the parameters for the FFI boundary, call the external host function //! exported into wasm and convert back the result. //! -//! [`generate`](bare_function_interface::generate) is the entry point for generating for each +//! [`generate`] is the entry point for generating for each //! trait method one bare function. //! -//! [`function_for_method`](bare_function_interface::function_for_method) generates the bare +//! [`function_for_method`] generates the bare //! function per trait method. Each bare function contains both implementations. The implementations //! are feature-gated, so that one is compiled for the native and the other for the wasm side. diff --git a/primitives/runtime-interface/src/host.rs b/primitives/runtime-interface/src/host.rs index 2fc272165f33c..cf03e6623af59 100644 --- a/primitives/runtime-interface/src/host.rs +++ b/primitives/runtime-interface/src/host.rs @@ -49,6 +49,9 @@ pub trait IntoPreallocatedFFIValue: RIType { } /// Something that can be created from a ffi value. +/// Implementations are safe to assume that the `arg` given to `from_ffi_value` +/// is only generated by the corresponding [`wasm::IntoFFIValue`](crate::wasm::IntoFFIValue) +/// implementation. pub trait FromFFIValue: RIType { /// As `Self` can be an unsized type, it needs to be represented by a sized type at the host. /// This `SelfInstance` is the sized type. diff --git a/primitives/runtime-interface/src/impls.rs b/primitives/runtime-interface/src/impls.rs index 23be7fd35adfc..3cd114268bbd3 100644 --- a/primitives/runtime-interface/src/impls.rs +++ b/primitives/runtime-interface/src/impls.rs @@ -150,7 +150,7 @@ impl IntoFFIValue for bool { /// /// The `u64` value is build by `length 32bit << 32 | pointer 32bit` /// -/// If `T == u8` the length and the pointer are taken directly from the `Self`. +/// If `T == u8` the length and the pointer are taken directly from `Self`. /// Otherwise `Self` is encoded and the length and the pointer are taken from the encoded vector. impl RIType for Vec { type FFIType = u64; @@ -209,7 +209,7 @@ impl FromFFIValue for Vec { /// /// The `u64` value is build by `length 32bit << 32 | pointer 32bit` /// -/// If `T == u8` the length and the pointer are taken directly from the `Self`. +/// If `T == u8` the length and the pointer are taken directly from `Self`. /// Otherwise `Self` is encoded and the length and the pointer are taken from the encoded vector. impl RIType for [T] { type FFIType = u64; @@ -400,7 +400,7 @@ for_primitive_types! { /// /// The `u64` value is build by `length 32bit << 32 | pointer 32bit` /// -/// The length and the pointer are taken directly from the `Self`. +/// The length and the pointer are taken directly from `Self`. impl RIType for str { type FFIType = u64; } diff --git a/primitives/runtime-interface/src/lib.rs b/primitives/runtime-interface/src/lib.rs index 330b1699a609f..30d74ad2db399 100644 --- a/primitives/runtime-interface/src/lib.rs +++ b/primitives/runtime-interface/src/lib.rs @@ -25,10 +25,10 @@ //! # Using a type in a runtime interface //! //! Any type that should be used in a runtime interface as argument or return value needs to -//! implement [`RIType`]. The associated type `FFIType` is the type that is used in the FFI -//! function to represent the actual type. For example `[T]` is represented by an `u64`. The slice -//! pointer and the length will be mapped to an `u64` value. For more information, see the -//! implementation of [`RIType`] for [`T`]. The FFI function definition is used when calling from +//! implement [`RIType`]. The associated type [`FFIType`](RIType::FFIType) is the type that is used +//! in the FFI function to represent the actual type. For example `[T]` is represented by an `u64`. +//! The slice pointer and the length will be mapped to an `u64` value. For more information see +//! this [table](#ffi-type-and-conversion). The FFI function definition is used when calling from //! the wasm runtime into the node. //! //! Traits are used to convert from a type to the corresponding [`RIType::FFIType`]. @@ -69,6 +69,34 @@ //! //! For more information on declaring a runtime interface, see //! [`#[runtime_interface]`](attr.runtime_interface.html). +//! +//! # FFI type and conversion +//! +//! The following table documents how values of types are passed between the wasm and +//! the host side and how they are converted into the corresponding type. +//! +//! | Type | FFI type | Conversion | +//! |----|----|----| +//! | `u8` | `u8` | `Identity` | +//! | `u16` | `u16` | `Identity` | +//! | `u32` | `u32` | `Identity` | +//! | `u64` | `u64` | `Identity` | +//! | `i8` | `i8` | `Identity` | +//! | `i16` | `i16` | `Identity` | +//! | `i32` | `i32` | `Identity` | +//! | `i64` | `i64` | `Identity` | +//! | `bool` | `u8` | `if v { 1 } else { 0 }` | +//! | `&str` | `u64` | v.len() 32bit << 32 | v.as_ptr() 32bit | +//! | `&[u8]` | `u64` | v.len() 32bit << 32 | v.as_ptr() 32bit | +//! | `Vec` | `u64` | v.len() 32bit << 32 | v.as_ptr() 32bit | +//! | `Vec where T: Encode` | `u64` | `let e = v.encode();`

e.len() 32bit << 32 | e.as_ptr() 32bit | +//! | `&[T] where T: Encode` | `u64` | `let e = v.encode();`

e.len() 32bit << 32 | e.as_ptr() 32bit | +//! | `[u8; N]` | `u32` | `v.as_ptr()` | +//! | `*const T` | `u32` | `Identity` | +//! | [`T where T: PassBy`](pass_by::Inner) | Depends on inner | Depends on inner | +//! | [`T where T: PassBy`](pass_by::Codec) | `u64`| v.len() 32bit << 32 | v.as_ptr() 32bit | +//! +//! `Identity` means that the value is converted directly into the corresponding FFI type. #![cfg_attr(not(feature = "std"), no_std)] @@ -237,7 +265,7 @@ pub use codec; pub(crate) mod impls; #[cfg(feature = "std")] pub mod host; -#[cfg(not(feature = "std"))] +#[cfg(any(not(feature = "std"), doc))] pub mod wasm; pub mod pass_by; diff --git a/primitives/runtime-interface/src/pass_by.rs b/primitives/runtime-interface/src/pass_by.rs index 81da2fe7e6a3a..597a0284eee2a 100644 --- a/primitives/runtime-interface/src/pass_by.rs +++ b/primitives/runtime-interface/src/pass_by.rs @@ -14,11 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -//! Provides the [`PassBy`](pass_by::PassBy) trait to simplify the implementation of the +//! Provides the [`PassBy`](PassBy) trait to simplify the implementation of the //! runtime interface traits for custom types. //! -//! [`Codec`](pass_by::Codec), [`Inner`](pass_by::Inner) and [`Enum`](pass_by::Enum) are the -//! provided strategy implementations. +//! [`Codec`], [`Inner`] and [`Enum`] are the provided strategy implementations. use crate::{RIType, util::{unpack_ptr_and_len, pack_ptr_and_len}}; diff --git a/primitives/runtime-interface/src/wasm.rs b/primitives/runtime-interface/src/wasm.rs index 476dfb8a4ce5a..a0801c2bfb5a9 100644 --- a/primitives/runtime-interface/src/wasm.rs +++ b/primitives/runtime-interface/src/wasm.rs @@ -26,7 +26,8 @@ use sp_std::cell::Cell; /// /// It is unsafe behavior to call `Something::into_ffi_value().get()` and take this as input for /// `from_ffi_value`. Implementations are safe to assume that the `arg` given to `from_ffi_value` -/// is only generated by the corresponding `host::IntoFFIValue` implementation. +/// is only generated by the corresponding [`host::IntoFFIValue`](crate::host::IntoFFIValue) +/// implementation. pub trait FromFFIValue: Sized + RIType { /// Create `Self` from the given ffi value. fn from_ffi_value(arg: Self::FFIType) -> Self; diff --git a/primitives/runtime-interface/test/Cargo.toml b/primitives/runtime-interface/test/Cargo.toml index 35eb8cb8e1f56..f0a4c0edd4a53 100644 --- a/primitives/runtime-interface/test/Cargo.toml +++ b/primitives/runtime-interface/test/Cargo.toml @@ -7,8 +7,8 @@ publish = false [dependencies] sp-runtime-interface = { version = "2.0.0", path = "../" } -sc-executor = { version = "2.0.0", path = "../../../client/executor" } +sc-executor = { version = "0.8", path = "../../../client/executor" } sp-runtime-interface-test-wasm = { version = "2.0.0", path = "../test-wasm" } -sp-state-machine = { version = "2.0.0", path = "../../../primitives/state-machine" } +sp-state-machine = { version = "0.8", path = "../../../primitives/state-machine" } sp-core = { version = "2.0.0", path = "../../core" } sp-io = { version = "2.0.0", path = "../../io" } diff --git a/primitives/runtime-interface/test/src/lib.rs b/primitives/runtime-interface/test/src/lib.rs index 3cfb589b05519..b209e1f71ce55 100644 --- a/primitives/runtime-interface/test/src/lib.rs +++ b/primitives/runtime-interface/test/src/lib.rs @@ -30,7 +30,6 @@ fn call_wasm_method(method: &str) -> TestExternalities { let mut ext_ext = ext.ext(); sc_executor::call_in_wasm::< - _, ( HF, sp_io::SubstrateHostFunctions, diff --git a/primitives/runtime/src/generic/digest.rs b/primitives/runtime/src/generic/digest.rs index 6653cfb8a86b3..fef02d4f00959 100644 --- a/primitives/runtime/src/generic/digest.rs +++ b/primitives/runtime/src/generic/digest.rs @@ -23,7 +23,7 @@ use sp_std::prelude::*; use crate::ConsensusEngineId; use crate::codec::{Decode, Encode, Input, Error}; -use sp_core::RuntimeDebug; +use sp_core::{ChangesTrieConfiguration, RuntimeDebug}; /// Generic header digest. #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] @@ -97,10 +97,32 @@ pub enum DigestItem { /// by runtimes. Seal(ConsensusEngineId, Vec), + /// Digest item that contains signal from changes tries manager to the + /// native code. + ChangesTrieSignal(ChangesTrieSignal), + /// Some other thing. Unsupported and experimental. Other(Vec), } +/// Available changes trie signals. +#[derive(PartialEq, Eq, Clone, Encode, Decode)] +#[cfg_attr(feature = "std", derive(Debug))] +pub enum ChangesTrieSignal { + /// New changes trie configuration is enacted, starting from **next block**. + /// + /// The block that emits this signal will contain changes trie (CT) that covers + /// blocks range [BEGIN; current block], where BEGIN is (order matters): + /// - LAST_TOP_LEVEL_DIGEST_BLOCK+1 if top level digest CT has ever been created + /// using current configuration AND the last top level digest CT has been created + /// at block LAST_TOP_LEVEL_DIGEST_BLOCK; + /// - LAST_CONFIGURATION_CHANGE_BLOCK+1 if there has been CT configuration change + /// before and the last configuration change happened at block + /// LAST_CONFIGURATION_CHANGE_BLOCK; + /// - 1 otherwise. + NewConfiguration(Option), +} + #[cfg(feature = "std")] impl serde::Serialize for DigestItem { fn serialize(&self, seq: S) -> Result where S: serde::Serializer { @@ -141,6 +163,9 @@ pub enum DigestItemRef<'a, Hash: 'a> { /// Put a Seal on it. This is only used by native code, and is never seen /// by runtimes. Seal(&'a ConsensusEngineId, &'a Vec), + /// Digest item that contains signal from changes tries manager to the + /// native code. + ChangesTrieSignal(&'a ChangesTrieSignal), /// Any 'non-system' digest item, opaque to the native code. Other(&'a Vec), } @@ -152,11 +177,12 @@ pub enum DigestItemRef<'a, Hash: 'a> { #[repr(u32)] #[derive(Encode, Decode)] pub enum DigestItemType { + Other = 0, ChangesTrieRoot = 2, - PreRuntime = 6, Consensus = 4, Seal = 5, - Other = 0, + PreRuntime = 6, + ChangesTrieSignal = 7, } /// Type of a digest item that contains raw data; this also names the consensus engine ID where @@ -181,6 +207,7 @@ impl DigestItem { DigestItem::PreRuntime(ref v, ref s) => DigestItemRef::PreRuntime(v, s), DigestItem::Consensus(ref v, ref s) => DigestItemRef::Consensus(v, s), DigestItem::Seal(ref v, ref s) => DigestItemRef::Seal(v, s), + DigestItem::ChangesTrieSignal(ref s) => DigestItemRef::ChangesTrieSignal(s), DigestItem::Other(ref v) => DigestItemRef::Other(v), } } @@ -205,6 +232,11 @@ impl DigestItem { self.dref().as_seal() } + /// Returns `Some` if the entry is the `ChangesTrieSignal` entry. + pub fn as_changes_trie_signal(&self) -> Option<&ChangesTrieSignal> { + self.dref().as_changes_trie_signal() + } + /// Returns Some if `self` is a `DigestItem::Other`. pub fn as_other(&self) -> Option<&[u8]> { match *self { @@ -253,6 +285,9 @@ impl Decode for DigestItem { let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; Ok(DigestItem::Seal(vals.0, vals.1)) }, + DigestItemType::ChangesTrieSignal => Ok(DigestItem::ChangesTrieSignal( + Decode::decode(input)?, + )), DigestItemType::Other => Ok(DigestItem::Other( Decode::decode(input)?, )), @@ -293,6 +328,14 @@ impl<'a, Hash> DigestItemRef<'a, Hash> { } } + /// Cast this digest item into `ChangesTrieSignal`. + pub fn as_changes_trie_signal(&self) -> Option<&'a ChangesTrieSignal> { + match *self { + DigestItemRef::ChangesTrieSignal(ref changes_trie_signal) => Some(changes_trie_signal), + _ => None, + } + } + /// Cast this digest item into `PreRuntime` pub fn as_other(&self) -> Option<&'a [u8]> { match *self { @@ -342,6 +385,10 @@ impl<'a, Hash: Encode> Encode for DigestItemRef<'a, Hash> { DigestItemType::PreRuntime.encode_to(&mut v); (val, data).encode_to(&mut v); }, + DigestItemRef::ChangesTrieSignal(changes_trie_signal) => { + DigestItemType::ChangesTrieSignal.encode_to(&mut v); + changes_trie_signal.encode_to(&mut v); + }, DigestItemRef::Other(val) => { DigestItemType::Other.encode_to(&mut v); val.encode_to(&mut v); @@ -352,6 +399,15 @@ impl<'a, Hash: Encode> Encode for DigestItemRef<'a, Hash> { } } +impl ChangesTrieSignal { + /// Try to cast this signal to NewConfiguration. + pub fn as_new_configuration(&self) -> Option<&Option> { + match self { + ChangesTrieSignal::NewConfiguration(config) => Some(config), + } + } +} + impl<'a, Hash: Encode> codec::EncodeLike for DigestItemRef<'a, Hash> {} #[cfg(test)] diff --git a/primitives/runtime/src/generic/mod.rs b/primitives/runtime/src/generic/mod.rs index 2416bb70470be..5e9928ba1909a 100644 --- a/primitives/runtime/src/generic/mod.rs +++ b/primitives/runtime/src/generic/mod.rs @@ -33,7 +33,7 @@ pub use self::checked_extrinsic::CheckedExtrinsic; pub use self::header::Header; pub use self::block::{Block, SignedBlock, BlockId}; pub use self::digest::{ - Digest, DigestItem, DigestItemRef, OpaqueDigestItemId + Digest, DigestItem, DigestItemRef, OpaqueDigestItemId, ChangesTrieSignal, }; use crate::codec::Encode; diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index c010ea0456d9e..6033f221a1f5f 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -53,6 +53,9 @@ pub mod testing; pub mod traits; pub mod transaction_validity; pub mod random_number_generator; +mod runtime_string; + +pub use crate::runtime_string::*; /// Re-export these since they're only "kind of" generic. pub use generic::{DigestItem, Digest}; @@ -92,27 +95,6 @@ impl TypeId for ModuleId { const TYPE_ID: [u8; 4] = *b"modl"; } -/// A String that is a `&'static str` on `no_std` and a `Cow<'static, str>` on `std`. -#[cfg(feature = "std")] -pub type RuntimeString = std::borrow::Cow<'static, str>; -/// A String that is a `&'static str` on `no_std` and a `Cow<'static, str>` on `std`. -#[cfg(not(feature = "std"))] -pub type RuntimeString = &'static str; - -/// Create a const [`RuntimeString`]. -#[cfg(feature = "std")] -#[macro_export] -macro_rules! create_runtime_str { - ( $y:expr ) => {{ std::borrow::Cow::Borrowed($y) }} -} - -/// Create a const [`RuntimeString`]. -#[cfg(not(feature = "std"))] -#[macro_export] -macro_rules! create_runtime_str { - ( $y:expr ) => {{ $y }} -} - #[cfg(feature = "std")] pub use serde::{Serialize, Deserialize, de::DeserializeOwned}; use crate::traits::IdentifyAccount; diff --git a/primitives/runtime/src/runtime_string.rs b/primitives/runtime/src/runtime_string.rs new file mode 100644 index 0000000000000..e7ee927e087be --- /dev/null +++ b/primitives/runtime/src/runtime_string.rs @@ -0,0 +1,117 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use codec::{Encode, Decode}; +use sp_core::RuntimeDebug; +use sp_std::vec::Vec; + +/// A string that wraps a `&'static str` in the runtime and `String`/`Vec` on decode. +#[derive(Eq, RuntimeDebug, Clone)] +pub enum RuntimeString { + /// The borrowed mode that wraps a `&'static str`. + Borrowed(&'static str), + /// The owned mode that wraps a `String`. + #[cfg(feature = "std")] + Owned(String), + /// The owned mode that wraps a `Vec`. + #[cfg(not(feature = "std"))] + Owned(Vec), +} + +impl From<&'static str> for RuntimeString { + fn from(data: &'static str) -> Self { + Self::Borrowed(data) + } +} + +#[cfg(feature = "std")] +impl From for String { + fn from(string: RuntimeString) -> Self { + match string { + RuntimeString::Borrowed(data) => data.to_owned(), + RuntimeString::Owned(data) => data, + } + } +} + +impl Default for RuntimeString { + fn default() -> Self { + Self::Borrowed(Default::default()) + } +} + +impl PartialEq for RuntimeString { + fn eq(&self, other: &Self) -> bool { + self.as_ref() == other.as_ref() + } +} + +impl AsRef<[u8]> for RuntimeString { + fn as_ref(&self) -> &[u8] { + match self { + Self::Borrowed(val) => val.as_ref(), + Self::Owned(val) => val.as_ref(), + } + } +} + +impl Encode for RuntimeString { + fn encode(&self) -> Vec { + match self { + Self::Borrowed(val) => val.encode(), + Self::Owned(val) => val.encode(), + } + } +} + +impl Decode for RuntimeString { + fn decode(value: &mut I) -> Result { + Decode::decode(value).map(Self::Owned) + } +} + +#[cfg(feature = "std")] +impl std::fmt::Display for RuntimeString { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + Self::Borrowed(val) => write!(f, "{}", val), + Self::Owned(val) => write!(f, "{}", val), + } + } +} + +#[cfg(feature = "std")] +impl serde::Serialize for RuntimeString { + fn serialize(&self, serializer: S) -> Result { + match self { + Self::Borrowed(val) => val.serialize(serializer), + Self::Owned(val) => val.serialize(serializer), + } + } +} + +#[cfg(feature = "std")] +impl<'de> serde::Deserialize<'de> for RuntimeString { + fn deserialize>(de: D) -> Result { + String::deserialize(de).map(Self::Owned) + } +} + +/// Create a const [`RuntimeString`]. +#[macro_export] +macro_rules! create_runtime_str { + ( $y:expr ) => {{ $crate::RuntimeString::Borrowed($y) }} +} diff --git a/primitives/sandbox/Cargo.toml b/primitives/sandbox/Cargo.toml index ff3b7662cd5a5..5d161a6deb9da 100755 --- a/primitives/sandbox/Cargo.toml +++ b/primitives/sandbox/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-sandbox" -version = "2.0.0" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index 1424845ea6cc9..d390471aca2d6 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-state-machine" -version = "2.0.0" +version = "0.8.0" authors = ["Parity Technologies "] description = "Substrate State Machine" edition = "2018" @@ -9,7 +9,7 @@ edition = "2018" log = "0.4.8" parking_lot = "0.9.0" hash-db = "0.15.2" -trie-db = "0.18.1" +trie-db = "0.19.2" trie-root = "0.15.2" sp-trie = { version = "2.0.0", path = "../trie" } sp-core = { version = "2.0.0", path = "../core" } @@ -17,7 +17,7 @@ sp-panic-handler = { version = "2.0.0", path = "../panic-handler" } codec = { package = "parity-scale-codec", version = "1.0.0" } num-traits = "0.2.8" rand = "0.7.2" -sp-externalities = { version = "2.0.0", path = "../externalities" } +sp-externalities = { version = "0.8.0", path = "../externalities" } [dev-dependencies] hex-literal = "0.2.1" diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 75733d68b3ffa..4ef9b970ae21d 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -26,7 +26,7 @@ use sp_trie::{TrieMut, MemoryDB, trie_types::TrieDBMut}; use crate::{ trie_backend::TrieBackend, trie_backend_essence::TrieBackendStorage, - UsageInfo, + UsageInfo, StorageKey, StorageValue, StorageCollection, }; /// A state backend is used to read state data and can have changes committed @@ -44,7 +44,7 @@ pub trait Backend: std::fmt::Debug { type TrieBackendStorage: TrieBackendStorage; /// Get keyed storage or None if there is nothing associated. - fn storage(&self, key: &[u8]) -> Result>, Self::Error>; + fn storage(&self, key: &[u8]) -> Result, Self::Error>; /// Get keyed storage value hash or None if there is nothing associated. fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { @@ -57,7 +57,7 @@ pub trait Backend: std::fmt::Debug { storage_key: &[u8], child_info: ChildInfo, key: &[u8], - ) -> Result>, Self::Error>; + ) -> Result, Self::Error>; /// Get child keyed storage value hash or None if there is nothing associated. fn child_storage_hash( @@ -85,7 +85,7 @@ pub trait Backend: std::fmt::Debug { } /// Return the next key in storage in lexicographic order or `None` if there is no value. - fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error>; + fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error>; /// Return the next key in child storage in lexicographic order or `None` if there is no value. fn next_child_storage_key( @@ -93,7 +93,7 @@ pub trait Backend: std::fmt::Debug { storage_key: &[u8], child_info: ChildInfo, key: &[u8] - ) -> Result>, Self::Error>; + ) -> Result, Self::Error>; /// Retrieve all entries keys of child storage and call `f` for each of those keys. fn for_keys_in_child_storage( @@ -129,7 +129,7 @@ pub trait Backend: std::fmt::Debug { /// Does not include child storage updates. fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) where - I: IntoIterator, Option>)>, + I: IntoIterator)>, H::Out: Ord; /// Calculate the child storage root, with given delta over what is already stored in @@ -142,14 +142,14 @@ pub trait Backend: std::fmt::Debug { delta: I, ) -> (H::Out, bool, Self::Transaction) where - I: IntoIterator, Option>)>, + I: IntoIterator)>, H::Out: Ord; /// Get all key/value pairs into a Vec. - fn pairs(&self) -> Vec<(Vec, Vec)>; + fn pairs(&self) -> Vec<(StorageKey, StorageValue)>; /// Get all keys with given prefix - fn keys(&self, prefix: &[u8]) -> Vec> { + fn keys(&self, prefix: &[u8]) -> Vec { let mut all = Vec::new(); self.for_keys_with_prefix(prefix, |k| all.push(k.to_vec())); all @@ -161,7 +161,7 @@ pub trait Backend: std::fmt::Debug { storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], - ) -> Vec> { + ) -> Vec { let mut all = Vec::new(); self.for_child_keys_with_prefix(storage_key, child_info, prefix, |k| all.push(k.to_vec())); all @@ -181,9 +181,9 @@ pub trait Backend: std::fmt::Debug { child_deltas: I2) -> (H::Out, Self::Transaction) where - I1: IntoIterator, Option>)>, - I2i: IntoIterator, Option>)>, - I2: IntoIterator, I2i, OwnedChildInfo)>, + I1: IntoIterator)>, + I2i: IntoIterator)>, + I2: IntoIterator, H::Out: Ord + Encode, { let mut txs: Self::Transaction = Default::default(); @@ -220,7 +220,7 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { type Transaction = T::Transaction; type TrieBackendStorage = T::TrieBackendStorage; - fn storage(&self, key: &[u8]) -> Result>, Self::Error> { + fn storage(&self, key: &[u8]) -> Result, Self::Error> { (*self).storage(key) } @@ -229,7 +229,7 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { storage_key: &[u8], child_info: ChildInfo, key: &[u8], - ) -> Result>, Self::Error> { + ) -> Result, Self::Error> { (*self).child_storage(storage_key, child_info, key) } @@ -242,7 +242,7 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { (*self).for_keys_in_child_storage(storage_key, child_info, f) } - fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { + fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error> { (*self).next_storage_key(key) } @@ -251,7 +251,7 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { storage_key: &[u8], child_info: ChildInfo, key: &[u8], - ) -> Result>, Self::Error> { + ) -> Result, Self::Error> { (*self).next_child_storage_key(storage_key, child_info, key) } @@ -271,7 +271,7 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) where - I: IntoIterator, Option>)>, + I: IntoIterator)>, H::Out: Ord, { (*self).storage_root(delta) @@ -284,13 +284,13 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { delta: I, ) -> (H::Out, bool, Self::Transaction) where - I: IntoIterator, Option>)>, + I: IntoIterator)>, H::Out: Ord, { (*self).child_storage_root(storage_key, child_info, delta) } - fn pairs(&self) -> Vec<(Vec, Vec)> { + fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { (*self).pairs() } @@ -316,8 +316,8 @@ impl Consolidate for () { } impl Consolidate for Vec<( - Option<(Vec, OwnedChildInfo)>, - Vec<(Vec, Option>)>, + Option<(StorageKey, OwnedChildInfo)>, + StorageCollection, )> { fn consolidate(&mut self, mut other: Self) { self.append(&mut other); @@ -334,7 +334,7 @@ impl> Consolidate for sp_trie::GenericMem pub(crate) fn insert_into_memory_db(mdb: &mut MemoryDB, input: I) -> Option where H: Hasher, - I: IntoIterator, Vec)>, + I: IntoIterator, { let mut root = ::Out::default(); { diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index d06aedfc4b6ee..d905657737a8a 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -19,7 +19,7 @@ use std::{ collections::BTreeMap, any::{TypeId, Any}, iter::FromIterator, ops::Bound }; -use crate::{Backend, InMemoryBackend}; +use crate::{Backend, InMemoryBackend, StorageKey, StorageValue}; use hash_db::Hasher; use sp_trie::{TrieConfiguration, default_child_trie_root}; use sp_trie::trie_types::Layout; @@ -46,7 +46,7 @@ impl BasicExternalities { } /// Insert key/value - pub fn insert(&mut self, k: Vec, v: Vec) -> Option> { + pub fn insert(&mut self, k: StorageKey, v: StorageValue) -> Option { self.inner.top.insert(k, v) } @@ -89,8 +89,8 @@ impl PartialEq for BasicExternalities { } } -impl FromIterator<(Vec, Vec)> for BasicExternalities { - fn from_iter, Vec)>>(iter: I) -> Self { +impl FromIterator<(StorageKey, StorageValue)> for BasicExternalities { + fn from_iter>(iter: I) -> Self { let mut t = Self::default(); t.inner.top.extend(iter); t @@ -101,8 +101,8 @@ impl Default for BasicExternalities { fn default() -> Self { Self::new(Default::default()) } } -impl From, Vec>> for BasicExternalities { - fn from(hashmap: BTreeMap, Vec>) -> Self { +impl From> for BasicExternalities { + fn from(hashmap: BTreeMap) -> Self { BasicExternalities { inner: Storage { top: hashmap, children: Default::default(), @@ -111,7 +111,7 @@ impl From, Vec>> for BasicExternalities { } impl Externalities for BasicExternalities { - fn storage(&self, key: &[u8]) -> Option> { + fn storage(&self, key: &[u8]) -> Option { self.inner.top.get(key).cloned() } @@ -119,7 +119,7 @@ impl Externalities for BasicExternalities { self.storage(key).map(|v| Blake2Hasher::hash(&v).encode()) } - fn original_storage(&self, key: &[u8]) -> Option> { + fn original_storage(&self, key: &[u8]) -> Option { self.storage(key) } @@ -132,7 +132,7 @@ impl Externalities for BasicExternalities { storage_key: ChildStorageKey, _child_info: ChildInfo, key: &[u8], - ) -> Option> { + ) -> Option { self.inner.children.get(storage_key.as_ref()).and_then(|child| child.data.get(key)).cloned() } @@ -159,11 +159,11 @@ impl Externalities for BasicExternalities { storage_key: ChildStorageKey, child_info: ChildInfo, key: &[u8], - ) -> Option> { + ) -> Option { Externalities::child_storage(self, storage_key, child_info, key) } - fn next_storage_key(&self, key: &[u8]) -> Option> { + fn next_storage_key(&self, key: &[u8]) -> Option { let range = (Bound::Excluded(key), Bound::Unbounded); self.inner.top.range::<[u8], _>(range).next().map(|(k, _)| k).cloned() } @@ -173,13 +173,13 @@ impl Externalities for BasicExternalities { storage_key: ChildStorageKey, _child_info: ChildInfo, key: &[u8], - ) -> Option> { + ) -> Option { let range = (Bound::Excluded(key), Bound::Unbounded); self.inner.children.get(storage_key.as_ref()) .and_then(|child| child.data.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()) } - fn place_storage(&mut self, key: Vec, maybe_value: Option>) { + fn place_storage(&mut self, key: StorageKey, maybe_value: Option) { if is_child_storage_key(&key) { warn!(target: "trie", "Refuse to set child storage key via main storage"); return; @@ -195,8 +195,8 @@ impl Externalities for BasicExternalities { &mut self, storage_key: ChildStorageKey, child_info: ChildInfo, - key: Vec, - value: Option>, + key: StorageKey, + value: Option, ) { let child_map = self.inner.children.entry(storage_key.into_owned()) .or_insert_with(|| StorageChild { diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index ded8e4f6d19ed..c731d4104b260 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -21,13 +21,17 @@ use std::collections::btree_map::Entry; use codec::{Decode, Encode}; use hash_db::Hasher; use num_traits::One; -use crate::backend::Backend; -use crate::overlayed_changes::OverlayedChanges; -use crate::trie_backend_essence::TrieBackendEssence; -use crate::changes_trie::build_iterator::digest_build_iterator; -use crate::changes_trie::input::{InputKey, InputPair, DigestIndex, ExtrinsicIndex}; -use crate::changes_trie::{AnchorBlockId, ConfigurationRange, Storage, BlockNumber}; -use crate::changes_trie::input::ChildIndex; +use crate::{ + StorageKey, + backend::Backend, + overlayed_changes::OverlayedChanges, + trie_backend_essence::TrieBackendEssence, + changes_trie::{ + AnchorBlockId, ConfigurationRange, Storage, BlockNumber, + build_iterator::digest_build_iterator, + input::{InputKey, InputPair, DigestIndex, ExtrinsicIndex, ChildIndex}, + }, +}; /// Prepare input pairs for building a changes trie of given block. /// @@ -101,7 +105,7 @@ fn prepare_extrinsics_input<'a, B, H, Number>( Number: BlockNumber, { - let mut children_keys = BTreeSet::>::new(); + let mut children_keys = BTreeSet::::new(); let mut children_result = BTreeMap::new(); for (storage_key, _) in changes.prospective.children.iter() .chain(changes.committed.children.iter()) { @@ -126,7 +130,7 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( backend: &'a B, block: &Number, changes: &'a OverlayedChanges, - storage_key: Option>, + storage_key: Option, ) -> Result> + 'a, String> where B: Backend, @@ -231,7 +235,7 @@ fn prepare_digest_input<'a, H, Number>( let trie_root = storage.root(parent, digest_build_block.clone())?; let trie_root = trie_root.ok_or_else(|| format!("No changes trie root for block {}", digest_build_block.clone()))?; - let insert_to_map = |map: &mut BTreeMap<_,_>, key: Vec| { + let insert_to_map = |map: &mut BTreeMap<_,_>, key: StorageKey| { match map.entry(key.clone()) { Entry::Vacant(entry) => { entry.insert((DigestIndex { @@ -277,7 +281,7 @@ fn prepare_digest_input<'a, H, Number>( return Ok((map, child_map)); } - let mut children_roots = BTreeMap::, _>::new(); + let mut children_roots = BTreeMap::::new(); { let trie_storage = TrieBackendEssence::<_, H>::new( crate::changes_trie::TrieBackendStorageAdapter(storage), @@ -356,7 +360,6 @@ mod test { OverlayedChanges, Configuration, ) { - let config = Configuration { digest_interval: 4, digest_levels: 2 }; let backend: InMemoryBackend<_> = vec![ (vec![100], vec![255]), (vec![101], vec![255]), @@ -465,8 +468,9 @@ mod test { ].into_iter().collect(), CHILD_INFO_1.to_owned())), ].into_iter().collect(), }, - changes_trie_config: Some(config.clone()), + collect_extrinsics: true, }; + let config = Configuration { digest_interval: 4, digest_levels: 2 }; (backend, storage, changes, config) } @@ -553,7 +557,6 @@ mod test { InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![100] }, vec![0, 2]), ]), ]); - } test_with_zero(0); @@ -597,7 +600,6 @@ mod test { InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 16, key: vec![100] }, vec![0, 2]), ]), ]); - } test_with_zero(0); @@ -706,8 +708,7 @@ mod test { #[test] fn cache_is_used_when_changes_trie_is_built() { - let (backend, mut storage, changes, _) = prepare_for_build(0); - let config = changes.changes_trie_config.as_ref().unwrap(); + let (backend, mut storage, changes, config) = prepare_for_build(0); let parent = AnchorBlockId { hash: Default::default(), number: 15 }; // override some actual values from storage with values from the cache @@ -770,6 +771,5 @@ mod test { InputPair::DigestIndex(DigestIndex { block: 16u64, key: vec![106] }, vec![4]), ], ); - } } diff --git a/primitives/state-machine/src/changes_trie/build_cache.rs b/primitives/state-machine/src/changes_trie/build_cache.rs index 76e3652e16681..9d0dbb4c1f310 100644 --- a/primitives/state-machine/src/changes_trie/build_cache.rs +++ b/primitives/state-machine/src/changes_trie/build_cache.rs @@ -18,6 +18,8 @@ use std::collections::{HashMap, HashSet}; +use crate::StorageKey; + /// Changes trie build cache. /// /// Helps to avoid read of changes tries from the database when digest trie @@ -36,7 +38,7 @@ pub struct BuildCache { /// The `Option>` in inner `HashMap` stands for the child storage key. /// If it is `None`, then the `HashSet` contains keys changed in top-level storage. /// If it is `Some`, then the `HashSet` contains keys changed in child storage, identified by the key. - changed_keys: HashMap>, HashSet>>>, + changed_keys: HashMap, HashSet>>, } /// The action to perform when block-with-changes-trie is imported. @@ -54,7 +56,7 @@ pub struct CachedBuildData { block: N, trie_root: H, digest_input_blocks: Vec, - changed_keys: HashMap>, HashSet>>, + changed_keys: HashMap, HashSet>, } /// The action to perform when block-with-changes-trie is imported. @@ -70,7 +72,7 @@ pub(crate) enum IncompleteCacheAction { #[derive(Debug, PartialEq)] pub(crate) struct IncompleteCachedBuildData { digest_input_blocks: Vec, - changed_keys: HashMap>, HashSet>>, + changed_keys: HashMap, HashSet>, } impl BuildCache @@ -87,7 +89,7 @@ impl BuildCache } /// Get cached changed keys for changes trie with given root. - pub fn get(&self, root: &H) -> Option<&HashMap>, HashSet>>> { + pub fn get(&self, root: &H) -> Option<&HashMap, HashSet>> { self.changed_keys.get(&root) } @@ -96,7 +98,7 @@ impl BuildCache pub fn with_changed_keys( &self, root: &H, - functor: &mut dyn FnMut(&HashMap>, HashSet>>), + functor: &mut dyn FnMut(&HashMap, HashSet>), ) -> bool { match self.changed_keys.get(&root) { Some(changed_keys) => { @@ -162,8 +164,8 @@ impl IncompleteCacheAction { /// Insert changed keys of given storage into cached data. pub(crate) fn insert( self, - storage_key: Option>, - changed_keys: HashSet>, + storage_key: Option, + changed_keys: HashSet, ) -> Self { match self { IncompleteCacheAction::CacheBuildData(build_data) => @@ -198,8 +200,8 @@ impl IncompleteCachedBuildData { fn insert( mut self, - storage_key: Option>, - changed_keys: HashSet>, + storage_key: Option, + changed_keys: HashSet, ) -> Self { self.changed_keys.insert(storage_key, changed_keys); self @@ -259,4 +261,4 @@ mod tests { assert_eq!(cache.changed_keys.len(), 0); } -} \ No newline at end of file +} diff --git a/primitives/state-machine/src/changes_trie/input.rs b/primitives/state-machine/src/changes_trie/input.rs index 9b3341ca5842b..4a1420f8486f9 100644 --- a/primitives/state-machine/src/changes_trie/input.rs +++ b/primitives/state-machine/src/changes_trie/input.rs @@ -17,7 +17,10 @@ //! Different types of changes trie input pairs. use codec::{Decode, Encode, Input, Output, Error}; -use crate::changes_trie::BlockNumber; +use crate::{ + StorageKey, StorageValue, + changes_trie::BlockNumber +}; /// Key of { changed key => set of extrinsic indices } mapping. #[derive(Clone, Debug, PartialEq, Eq)] @@ -25,7 +28,7 @@ pub struct ExtrinsicIndex { /// Block at which this key has been inserted in the trie. pub block: Number, /// Storage key this node is responsible for. - pub key: Vec, + pub key: StorageKey, } /// Value of { changed key => set of extrinsic indices } mapping. @@ -37,7 +40,7 @@ pub struct DigestIndex { /// Block at which this key has been inserted in the trie. pub block: Number, /// Storage key this node is responsible for. - pub key: Vec, + pub key: StorageKey, } /// Key of { childtrie key => Childchange trie } mapping. @@ -46,7 +49,7 @@ pub struct ChildIndex { /// Block at which this key has been inserted in the trie. pub block: Number, /// Storage key this node is responsible for. - pub storage_key: Vec, + pub storage_key: StorageKey, } /// Value of { changed key => block/digest block numbers } mapping. @@ -89,8 +92,8 @@ impl InputPair { } } -impl Into<(Vec, Vec)> for InputPair { - fn into(self) -> (Vec, Vec) { +impl Into<(StorageKey, StorageValue)> for InputPair { + fn into(self) -> (StorageKey, StorageValue) { match self { InputPair::ExtrinsicIndex(key, value) => (key.encode(), value.encode()), InputPair::DigestIndex(key, value) => (key.encode(), value.encode()), diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index cb19d157dd80a..12074b7261aa5 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -63,20 +63,25 @@ pub use self::changes_iterator::{ key_changes, key_changes_proof, key_changes_proof_check, key_changes_proof_check_with_db, }; -pub use self::prune::{prune, oldest_non_pruned_trie}; +pub use self::prune::prune; use std::collections::{HashMap, HashSet}; use std::convert::TryInto; use hash_db::{Hasher, Prefix}; -use crate::backend::Backend; use num_traits::{One, Zero}; use codec::{Decode, Encode}; use sp_core; -use crate::changes_trie::build::prepare_input; -use crate::changes_trie::build_cache::{IncompleteCachedBuildData, IncompleteCacheAction}; -use crate::overlayed_changes::OverlayedChanges; use sp_trie::{MemoryDB, DBValue, TrieMut}; use sp_trie::trie_types::TrieDBMut; +use crate::{ + StorageKey, + backend::Backend, + overlayed_changes::OverlayedChanges, + changes_trie::{ + build::prepare_input, + build_cache::{IncompleteCachedBuildData, IncompleteCacheAction}, + }, +}; /// Changes that are made outside of extrinsics are marked with this index; pub const NO_EXTRINSIC_INDEX: u32 = 0xffffffff; @@ -121,6 +126,18 @@ pub struct AnchorBlockId { pub number: Number, } +/// Changes tries state at some block. +pub struct State<'a, H, Number> { + /// Configuration that is active at given block. + pub config: Configuration, + /// Configuration activation block number. Zero if it is the first coonfiguration on the chain, + /// or number of the block that have emit NewConfiguration signal (thus activating configuration + /// starting from the **next** block). + pub zero: Number, + /// Underlying changes tries storage reference. + pub storage: &'a dyn Storage, +} + /// Changes trie storage. Provides access to trie roots and trie nodes. pub trait RootsStorage: Send + Sync { /// Resolve hash of the block into anchor. @@ -139,7 +156,7 @@ pub trait Storage: RootsStorage { fn with_cached_changed_keys( &self, root: &H::Out, - functor: &mut dyn FnMut(&HashMap>, HashSet>>), + functor: &mut dyn FnMut(&HashMap, HashSet>), ) -> bool; /// Get a trie node. fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; @@ -170,14 +187,43 @@ pub struct ConfigurationRange<'a, N> { pub end: Option, } +impl<'a, H, Number> State<'a, H, Number> { + /// Create state with given config and storage. + pub fn new( + config: Configuration, + zero: Number, + storage: &'a dyn Storage, + ) -> Self { + Self { + config, + zero, + storage, + } + } +} + +impl<'a, H, Number: Clone> Clone for State<'a, H, Number> { + fn clone(&self) -> Self { + State { + config: self.config.clone(), + zero: self.zero.clone(), + storage: self.storage, + } + } +} + +/// Create state where changes tries are disabled. +pub fn disabled_state<'a, H, Number>() -> Option> { + None +} + /// Compute the changes trie root and transaction for given block. /// Returns Err(()) if unknown `parent_hash` has been passed. /// Returns Ok(None) if there's no data to perform computation. -/// Panics if background storage returns an error (and `panic_on_storage_error` is `true`) OR -/// if insert to MemoryDB fails. -pub fn build_changes_trie<'a, B: Backend, S: Storage, H: Hasher, Number: BlockNumber>( +/// Panics if background storage returns an error OR if insert to MemoryDB fails. +pub fn build_changes_trie<'a, B: Backend, H: Hasher, Number: BlockNumber>( backend: &B, - storage: Option<&'a S>, + state: Option<&'a State<'a, H, Number>>, changes: &OverlayedChanges, parent_hash: H::Out, panic_on_storage_error: bool, @@ -185,11 +231,6 @@ pub fn build_changes_trie<'a, B: Backend, S: Storage, H: Hasher, N where H::Out: Ord + 'static + Encode, { - let (storage, config) = match (storage, changes.changes_trie_config.as_ref()) { - (Some(storage), Some(config)) => (storage, config), - _ => return Ok(None), - }; - /// Panics when `res.is_err() && panic`, otherwise it returns `Err(())` on an error. fn maybe_panic( res: std::result::Result, @@ -203,23 +244,35 @@ pub fn build_changes_trie<'a, B: Backend, S: Storage, H: Hasher, N }) } - // FIXME: remove this in https://github.com/paritytech/substrate/pull/3201 - let config = ConfigurationRange { - config, - zero: Zero::zero(), - end: None, + // when storage isn't provided, changes tries aren't created + let state = match state { + Some(state) => state, + None => return Ok(None), }; // build_anchor error should not be considered fatal - let parent = storage.build_anchor(parent_hash).map_err(|_| ())?; + let parent = state.storage.build_anchor(parent_hash).map_err(|_| ())?; let block = parent.number.clone() + One::one(); + // prepare configuration range - we already know zero block. Current block may be the end block if configuration + // has been changed in this block + let is_config_changed = match changes.storage(sp_core::storage::well_known_keys::CHANGES_TRIE_CONFIG) { + Some(Some(new_config)) => new_config != &state.config.encode()[..], + Some(None) => true, + None => false, + }; + let config_range = ConfigurationRange { + config: &state.config, + zero: state.zero.clone(), + end: if is_config_changed { Some(block.clone()) } else { None }, + }; + // storage errors are considered fatal (similar to situations when runtime fetches values from storage) let (input_pairs, child_input_pairs, digest_input_blocks) = maybe_panic( prepare_input::( backend, - storage, - config.clone(), + state.storage, + config_range.clone(), changes, &parent, ), @@ -227,7 +280,7 @@ pub fn build_changes_trie<'a, B: Backend, S: Storage, H: Hasher, N )?; // prepare cached data - let mut cache_action = prepare_cached_build_data(config, block.clone()); + let mut cache_action = prepare_cached_build_data(config_range, block.clone()); let needs_changed_keys = cache_action.collects_changed_keys(); cache_action = cache_action.set_digest_input_blocks(digest_input_blocks); diff --git a/primitives/state-machine/src/changes_trie/prune.rs b/primitives/state-machine/src/changes_trie/prune.rs index e16daea15821e..f6be3223ae9f8 100644 --- a/primitives/state-machine/src/changes_trie/prune.rs +++ b/primitives/state-machine/src/changes_trie/prune.rs @@ -19,52 +19,26 @@ use hash_db::Hasher; use sp_trie::Recorder; use log::warn; -use num_traits::{One, Zero}; +use num_traits::One; use crate::proving_backend::ProvingBackendRecorder; use crate::trie_backend_essence::TrieBackendEssence; -use crate::changes_trie::{AnchorBlockId, Configuration, Storage, BlockNumber}; +use crate::changes_trie::{AnchorBlockId, Storage, BlockNumber}; use crate::changes_trie::storage::TrieBackendAdapter; use crate::changes_trie::input::{ChildIndex, InputKey}; use codec::{Decode, Codec}; -/// Get number of oldest block for which changes trie is not pruned -/// given changes trie configuration, pruning parameter and number of -/// best finalized block. -pub fn oldest_non_pruned_trie( - config: &Configuration, - min_blocks_to_keep: Number, - best_finalized_block: Number, -) -> Number { - let max_digest_interval = config.max_digest_interval(); - let best_finalized_block_rem = best_finalized_block.clone() % max_digest_interval.into(); - let max_digest_block = best_finalized_block - best_finalized_block_rem; - match pruning_range(config, min_blocks_to_keep, max_digest_block) { - Some((_, last_pruned_block)) => last_pruned_block + One::one(), - None => One::one(), - } -} - /// Prune obsolete changes tries. Pruning happens at the same block, where highest /// level digest is created. Pruning guarantees to save changes tries for last /// `min_blocks_to_keep` blocks. We only prune changes tries at `max_digest_interval` /// ranges. -/// Returns MemoryDB that contains all deleted changes tries nodes. -pub fn prune, H: Hasher, Number: BlockNumber, F: FnMut(H::Out)>( - config: &Configuration, - storage: &S, - min_blocks_to_keep: Number, +pub fn prune( + storage: &dyn Storage, + first: Number, + last: Number, current_block: &AnchorBlockId, mut remove_trie_node: F, ) where H::Out: Codec { - - // select range for pruning - let (first, last) = match pruning_range(config, min_blocks_to_keep, current_block.number.clone()) { - Some((first, last)) => (first, last), - None => return, - }; - // delete changes trie for every block in range - // FIXME: limit `max_digest_interval` so that this cycle won't involve huge ranges let mut block = first; loop { if block >= last.clone() + One::one() { @@ -112,8 +86,8 @@ pub fn prune, H: Hasher, Number: BlockNumber, F: FnMut(H:: } // Prune a trie. -fn prune_trie, H: Hasher, Number: BlockNumber, F: FnMut(H::Out)>( - storage: &S, +fn prune_trie( + storage: &dyn Storage, root: H::Out, remove_trie_node: &mut F, ) where H::Out: Codec { @@ -136,100 +110,26 @@ fn prune_trie, H: Hasher, Number: BlockNumber, F: FnMut(H: } } -/// Select blocks range (inclusive from both ends) for pruning changes tries in. -fn pruning_range( - config: &Configuration, - min_blocks_to_keep: Number, - block: Number, -) -> Option<(Number, Number)> { - // compute number of changes tries we actually want to keep - let (prune_interval, blocks_to_keep) = if config.is_digest_build_enabled() { - // we only CAN prune at block where max-level-digest is created - let max_digest_interval = match config.digest_level_at_block(Zero::zero(), block.clone()) { - Some((digest_level, digest_interval, _)) if digest_level == config.digest_levels => - digest_interval, - _ => return None, - }; - - // compute maximal number of high-level digests to keep - let max_digest_intervals_to_keep = max_digest_intervals_to_keep(min_blocks_to_keep, max_digest_interval); - - // number of blocks BEFORE current block where changes tries are not pruned - ( - max_digest_interval, - max_digest_intervals_to_keep.checked_mul(&max_digest_interval.into()) - ) - } else { - ( - 1, - Some(min_blocks_to_keep) - ) - }; - - // last block for which changes trie is pruned - let last_block_to_prune = blocks_to_keep.and_then(|b| block.checked_sub(&b)); - let first_block_to_prune = last_block_to_prune - .clone() - .and_then(|b| b.checked_sub(&prune_interval.into())); - - last_block_to_prune - .and_then(|last| first_block_to_prune.map(|first| (first + One::one(), last))) -} - -/// Select pruning delay for the changes tries. To make sure we could build a changes -/// trie at block B, we need an access to previous: -/// max_digest_interval = config.digest_interval ^ config.digest_levels -/// blocks. So we can only prune blocks that are earlier than B - max_digest_interval. -/// The pruning_delay stands for number of max_digest_interval-s that we want to keep: -/// 0 or 1: means that only last changes trie is guaranteed to exists; -/// 2: the last changes trie + previous changes trie -/// ... -fn max_digest_intervals_to_keep( - min_blocks_to_keep: Number, - max_digest_interval: u32, -) -> Number { - // config.digest_level_at_block ensures that it is not zero - debug_assert!(max_digest_interval != 0); - - let max_digest_intervals_to_keep = min_blocks_to_keep / max_digest_interval.into(); - if max_digest_intervals_to_keep.is_zero() { - One::one() - } else { - max_digest_intervals_to_keep - } -} - #[cfg(test)] mod tests { use std::collections::HashSet; use sp_trie::MemoryDB; - use sp_core::Blake2Hasher; + use sp_core::{H256, Blake2Hasher}; use crate::backend::insert_into_memory_db; use crate::changes_trie::storage::InMemoryStorage; use codec::Encode; use super::*; - fn config(interval: u32, levels: u32) -> Configuration { - Configuration { - digest_interval: interval, - digest_levels: levels, - } - } - - fn prune_by_collect, H: Hasher>( - config: &Configuration, - storage: &S, - min_blocks_to_keep: u64, + fn prune_by_collect( + storage: &dyn Storage, + first: u64, + last: u64, current_block: u64, - ) -> HashSet where H::Out: Codec { + ) -> HashSet { let mut pruned_trie_nodes = HashSet::new(); - prune( - config, - storage, - min_blocks_to_keep, - &AnchorBlockId { hash: Default::default(), number: current_block }, - |node| { pruned_trie_nodes.insert(node); }, - ); + let anchor = AnchorBlockId { hash: Default::default(), number: current_block }; + prune(storage, first, last, &anchor, + |node| { pruned_trie_nodes.insert(node); }); pruned_trie_nodes } @@ -243,7 +143,9 @@ mod tests { &mut mdb1, vec![(vec![10], vec![20])]).unwrap(); let mut mdb2 = MemoryDB::::default(); let root2 = insert_into_memory_db::( - &mut mdb2, vec![(vec![11], vec![21]), (vec![12], vec![22])]).unwrap(); + &mut mdb2, + vec![(vec![11], vec![21]), (vec![12], vec![22])], + ).unwrap(); let mut mdb3 = MemoryDB::::default(); let ch_root3 = insert_into_memory_db::( &mut mdb3, vec![(vec![110], vec![120])]).unwrap(); @@ -254,7 +156,9 @@ mod tests { ]).unwrap(); let mut mdb4 = MemoryDB::::default(); let root4 = insert_into_memory_db::( - &mut mdb4, vec![(vec![15], vec![25])]).unwrap(); + &mut mdb4, + vec![(vec![15], vec![25])], + ).unwrap(); let storage = InMemoryStorage::new(); storage.insert(65, root1, mdb1); storage.insert(66, root2, mdb2); @@ -264,109 +168,20 @@ mod tests { storage } - // l1-digest is created every 2 blocks - // l2-digest is created every 4 blocks - // we do not want to keep any additional changes tries - // => only one l2-digest is saved AND it is pruned once next is created - let config = Configuration { digest_interval: 2, digest_levels: 2 }; let storage = prepare_storage(); - assert!(prune_by_collect(&config, &storage, 0, 69).is_empty()); - assert!(prune_by_collect(&config, &storage, 0, 70).is_empty()); - assert!(prune_by_collect(&config, &storage, 0, 71).is_empty()); - let non_empty = prune_by_collect(&config, &storage, 0, 72); - assert!(!non_empty.is_empty()); - storage.remove_from_storage(&non_empty); - assert!(storage.into_mdb().drain().is_empty()); + assert!(prune_by_collect(&storage, 20, 30, 90).is_empty()); + assert!(!storage.into_mdb().drain().is_empty()); - // l1-digest is created every 2 blocks - // l2-digest is created every 4 blocks - // we want keep 1 additional changes tries - let config = Configuration { digest_interval: 2, digest_levels: 2 }; let storage = prepare_storage(); - assert!(prune_by_collect(&config, &storage, 8, 69).is_empty()); - assert!(prune_by_collect(&config, &storage, 8, 70).is_empty()); - assert!(prune_by_collect(&config, &storage, 8, 71).is_empty()); - assert!(prune_by_collect(&config, &storage, 8, 72).is_empty()); - assert!(prune_by_collect(&config, &storage, 8, 73).is_empty()); - assert!(prune_by_collect(&config, &storage, 8, 74).is_empty()); - assert!(prune_by_collect(&config, &storage, 8, 75).is_empty()); - let non_empty = prune_by_collect(&config, &storage, 8, 76); - assert!(!non_empty.is_empty()); - storage.remove_from_storage(&non_empty); - assert!(storage.into_mdb().drain().is_empty()); + let prune60_65 = prune_by_collect(&storage, 60, 65, 90); + assert!(!prune60_65.is_empty()); + storage.remove_from_storage(&prune60_65); + assert!(!storage.into_mdb().drain().is_empty()); - // l1-digest is created every 2 blocks - // we want keep 2 additional changes tries - let config = Configuration { digest_interval: 2, digest_levels: 1 }; let storage = prepare_storage(); - assert!(prune_by_collect(&config, &storage, 4, 69).is_empty()); - let non_empty = prune_by_collect(&config, &storage, 4, 70); - assert!(!non_empty.is_empty()); - storage.remove_from_storage(&non_empty); - assert!(prune_by_collect(&config, &storage, 4, 71).is_empty()); - let non_empty = prune_by_collect(&config, &storage, 4, 72); - assert!(!non_empty.is_empty()); - storage.remove_from_storage(&non_empty); + let prune60_70 = prune_by_collect(&storage, 60, 70, 90); + assert!(!prune60_70.is_empty()); + storage.remove_from_storage(&prune60_70); assert!(storage.into_mdb().drain().is_empty()); } - - #[test] - fn pruning_range_works() { - // DIGESTS ARE NOT CREATED + NO TRIES ARE PRUNED - assert_eq!(pruning_range(&config(10, 0), 2u64, 2u64), None); - - // DIGESTS ARE NOT CREATED + SOME TRIES ARE PRUNED - assert_eq!(pruning_range(&config(10, 0), 100u64, 110u64), Some((10, 10))); - assert_eq!(pruning_range(&config(10, 0), 100u64, 210u64), Some((110, 110))); - - // DIGESTS ARE CREATED + NO TRIES ARE PRUNED - - assert_eq!(pruning_range(&config(10, 2), 2u64, 0u64), None); - assert_eq!(pruning_range(&config(10, 2), 30u64, 100u64), None); - assert_eq!(pruning_range(&config(::std::u32::MAX, 2), 1u64, 1024u64), None); - assert_eq!(pruning_range(&config(::std::u32::MAX, 2), ::std::u64::MAX, 1024u64), None); - assert_eq!(pruning_range(&config(32, 2), 2048u64, 512u64), None); - assert_eq!(pruning_range(&config(32, 2), 2048u64, 1024u64), None); - - // DIGESTS ARE CREATED + SOME TRIES ARE PRUNED - - // when we do not want to keep any highest-level-digests - // (system forces to keep at least one) - assert_eq!(pruning_range(&config(4, 2), 0u64, 32u64), Some((1, 16))); - assert_eq!(pruning_range(&config(4, 2), 0u64, 64u64), Some((33, 48))); - // when we want to keep 1 (last) highest-level-digest - assert_eq!(pruning_range(&config(4, 2), 16u64, 32u64), Some((1, 16))); - assert_eq!(pruning_range(&config(4, 2), 16u64, 64u64), Some((33, 48))); - // when we want to keep 1 (last) + 1 additional level digests - assert_eq!(pruning_range(&config(32, 2), 4096u64, 5120u64), Some((1, 1024))); - assert_eq!(pruning_range(&config(32, 2), 4096u64, 6144u64), Some((1025, 2048))); - } - - #[test] - fn max_digest_intervals_to_keep_works() { - assert_eq!(max_digest_intervals_to_keep(1024u64, 1025), 1u64); - assert_eq!(max_digest_intervals_to_keep(1024u64, 1023), 1u64); - assert_eq!(max_digest_intervals_to_keep(1024u64, 512), 2u64); - assert_eq!(max_digest_intervals_to_keep(1024u64, 511), 2u64); - assert_eq!(max_digest_intervals_to_keep(1024u64, 100), 10u64); - } - - #[test] - fn oldest_non_pruned_trie_works() { - // when digests are not created at all - assert_eq!(oldest_non_pruned_trie(&config(0, 0), 100u64, 10u64), 1); - assert_eq!(oldest_non_pruned_trie(&config(0, 0), 100u64, 110u64), 11); - - // when only l1 digests are created - assert_eq!(oldest_non_pruned_trie(&config(100, 1), 100u64, 50u64), 1); - assert_eq!(oldest_non_pruned_trie(&config(100, 1), 100u64, 110u64), 1); - assert_eq!(oldest_non_pruned_trie(&config(100, 1), 100u64, 210u64), 101); - - // when l2 digests are created - assert_eq!(oldest_non_pruned_trie(&config(100, 2), 100u64, 50u64), 1); - assert_eq!(oldest_non_pruned_trie(&config(100, 2), 100u64, 110u64), 1); - assert_eq!(oldest_non_pruned_trie(&config(100, 2), 100u64, 210u64), 1); - assert_eq!(oldest_non_pruned_trie(&config(100, 2), 100u64, 10110u64), 1); - assert_eq!(oldest_non_pruned_trie(&config(100, 2), 100u64, 20110u64), 10001); - } } diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index 163ea7f412251..7fb418672872b 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -21,8 +21,11 @@ use hash_db::{Hasher, Prefix, EMPTY_PREFIX}; use sp_trie::DBValue; use sp_trie::MemoryDB; use parking_lot::RwLock; -use crate::changes_trie::{BuildCache, RootsStorage, Storage, AnchorBlockId, BlockNumber}; -use crate::trie_backend_essence::TrieBackendStorage; +use crate::{ + StorageKey, + trie_backend_essence::TrieBackendStorage, + changes_trie::{BuildCache, RootsStorage, Storage, AnchorBlockId, BlockNumber}, +}; #[cfg(test)] use crate::backend::insert_into_memory_db; @@ -93,7 +96,7 @@ impl InMemoryStorage { #[cfg(test)] pub fn with_inputs( mut top_inputs: Vec<(Number, Vec>)>, - children_inputs: Vec<(Vec, Vec<(Number, Vec>)>)>, + children_inputs: Vec<(StorageKey, Vec<(Number, Vec>)>)>, ) -> Self { let mut mdb = MemoryDB::default(); let mut roots = BTreeMap::new(); @@ -179,7 +182,7 @@ impl Storage for InMemoryStorage>, HashSet>>), + functor: &mut dyn FnMut(&HashMap, HashSet>), ) -> bool { self.cache.with_changed_keys(root, functor) } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 6fc4312d1079d..f293ae9f51615 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -17,8 +17,9 @@ //! Concrete externalities implementation. use crate::{ - backend::Backend, OverlayedChanges, StorageTransactionCache, - changes_trie::Storage as ChangesTrieStorage, + StorageKey, StorageValue, OverlayedChanges, StorageTransactionCache, + backend::Backend, + changes_trie::State as ChangesTrieState, }; use hash_db::Hasher; @@ -65,7 +66,7 @@ impl error::Error for Error { } /// Wraps a read-only backend, call executor, and current overlayed changes. -pub struct Ext<'a, H, N, B, T> +pub struct Ext<'a, H, N, B> where H: Hasher, B: 'a + Backend, @@ -77,8 +78,8 @@ pub struct Ext<'a, H, N, B, T> backend: &'a B, /// The cache for the storage transactions. storage_transaction_cache: &'a mut StorageTransactionCache, - /// Changes trie storage to read from. - changes_trie_storage: Option<&'a T>, + /// Changes trie state to read from. + changes_trie_state: Option>, /// Pseudo-unique id used for tracing. pub id: u16, /// Dummy usage of N arg. @@ -87,12 +88,11 @@ pub struct Ext<'a, H, N, B, T> extensions: Option<&'a mut Extensions>, } -impl<'a, H, N, B, T> Ext<'a, H, N, B, T> +impl<'a, H, N, B> Ext<'a, H, N, B> where H: Hasher, H::Out: Ord + 'static + codec::Codec, B: 'a + Backend, - T: 'a + ChangesTrieStorage, N: crate::changes_trie::BlockNumber, { @@ -101,13 +101,13 @@ where overlay: &'a mut OverlayedChanges, storage_transaction_cache: &'a mut StorageTransactionCache, backend: &'a B, - changes_trie_storage: Option<&'a T>, + changes_trie_state: Option>, extensions: Option<&'a mut Extensions>, ) -> Self { Ext { overlay, backend, - changes_trie_storage, + changes_trie_state, storage_transaction_cache, id: rand::random(), _phantom: Default::default(), @@ -124,15 +124,14 @@ where } #[cfg(test)] -impl<'a, H, N, B, T> Ext<'a, H, N, B, T> +impl<'a, H, N, B> Ext<'a, H, N, B> where H: Hasher, H::Out: Ord + 'static, B: 'a + Backend, - T: 'a + ChangesTrieStorage, N: crate::changes_trie::BlockNumber, { - pub fn storage_pairs(&self) -> Vec<(Vec, Vec)> { + pub fn storage_pairs(&self) -> Vec<(StorageKey, StorageValue)> { use std::collections::HashMap; self.backend.pairs().iter() @@ -146,15 +145,14 @@ where } } -impl<'a, H, B, T, N> Externalities for Ext<'a, H, N, B, T> +impl<'a, H, B, N> Externalities for Ext<'a, H, N, B> where H: Hasher, H::Out: Ord + 'static + codec::Codec, B: 'a + Backend, - T: 'a + ChangesTrieStorage, N: crate::changes_trie::BlockNumber, { - fn storage(&self, key: &[u8]) -> Option> { + fn storage(&self, key: &[u8]) -> Option { let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.overlay.storage(key).map(|x| x.map(|x| x.to_vec())).unwrap_or_else(|| self.backend.storage(key).expect(EXT_NOT_ALLOWED_TO_FAIL)); @@ -181,7 +179,7 @@ where result.map(|r| r.encode()) } - fn original_storage(&self, key: &[u8]) -> Option> { + fn original_storage(&self, key: &[u8]) -> Option { let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.backend.storage(key).expect(EXT_NOT_ALLOWED_TO_FAIL); @@ -210,7 +208,7 @@ where storage_key: ChildStorageKey, child_info: ChildInfo, key: &[u8], - ) -> Option> { + ) -> Option { let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.overlay .child_storage(storage_key.as_ref(), key) @@ -259,7 +257,7 @@ where storage_key: ChildStorageKey, child_info: ChildInfo, key: &[u8], - ) -> Option> { + ) -> Option { let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.backend .child_storage(storage_key.as_ref(), child_info, key) @@ -335,7 +333,7 @@ where result } - fn next_storage_key(&self, key: &[u8]) -> Option> { + fn next_storage_key(&self, key: &[u8]) -> Option { let next_backend_key = self.backend.next_storage_key(key).expect(EXT_NOT_ALLOWED_TO_FAIL); let next_overlay_key_change = self.overlay.next_storage_key_change(key); @@ -355,7 +353,7 @@ where storage_key: ChildStorageKey, child_info: ChildInfo, key: &[u8], - ) -> Option> { + ) -> Option { let next_backend_key = self.backend .next_child_storage_key(storage_key.as_ref(), child_info, key) .expect(EXT_NOT_ALLOWED_TO_FAIL); @@ -379,7 +377,7 @@ where } } - fn place_storage(&mut self, key: Vec, value: Option>) { + fn place_storage(&mut self, key: StorageKey, value: Option) { trace!(target: "state-trace", "{:04x}: Put {}={:?}", self.id, HexDisplay::from(&key), @@ -399,8 +397,8 @@ where &mut self, storage_key: ChildStorageKey, child_info: ChildInfo, - key: Vec, - value: Option>, + key: StorageKey, + value: Option, ) { trace!(target: "state-trace", "{:04x}: PutChild({}) {}={:?}", self.id, @@ -564,7 +562,7 @@ where let _guard = sp_panic_handler::AbortGuard::force_abort(); let root = self.overlay.changes_trie_root( self.backend, - self.changes_trie_storage.clone(), + self.changes_trie_state.as_ref(), Decode::decode(&mut &parent_hash[..]).map_err(|e| trace!( target: "state-trace", @@ -586,11 +584,10 @@ where } } -impl<'a, H, B, T, N> sp_externalities::ExtensionStore for Ext<'a, H, N, B, T> +impl<'a, H, B, N> sp_externalities::ExtensionStore for Ext<'a, H, N, B> where H: Hasher, B: 'a + Backend, - T: 'a + ChangesTrieStorage, N: crate::changes_trie::BlockNumber, { fn extension_by_type_id(&mut self, type_id: TypeId) -> Option<&mut dyn Any> { @@ -602,19 +599,19 @@ where mod tests { use super::*; use hex_literal::hex; + use num_traits::Zero; use codec::Encode; use sp_core::{H256, Blake2Hasher, storage::well_known_keys::EXTRINSIC_INDEX, map}; use crate::{ changes_trie::{ Configuration as ChangesTrieConfiguration, - InMemoryStorage as InMemoryChangesTrieStorage, + InMemoryStorage as TestChangesTrieStorage, }, InMemoryBackend, overlayed_changes::OverlayedValue, }; use sp_core::storage::{Storage, StorageChild}; type TestBackend = InMemoryBackend; - type TestChangesTrieStorage = InMemoryChangesTrieStorage; - type TestExt<'a> = Ext<'a, Blake2Hasher, u64, TestBackend, TestChangesTrieStorage>; + type TestExt<'a> = Ext<'a, Blake2Hasher, u64, TestBackend>; fn prepare_overlay_with_changes() -> OverlayedChanges { OverlayedChanges { @@ -629,10 +626,14 @@ mod tests { }), ].into_iter().collect(), committed: Default::default(), - changes_trie_config: Some(ChangesTrieConfiguration { - digest_interval: 0, - digest_levels: 0, - }), + collect_extrinsics: true, + } + } + + fn changes_trie_config() -> ChangesTrieConfiguration { + ChangesTrieConfiguration { + digest_interval: 0, + digest_levels: 0, } } @@ -646,13 +647,11 @@ mod tests { } #[test] - fn storage_changes_root_is_none_when_extrinsic_changes_are_none() { + fn storage_changes_root_is_none_when_state_is_not_provided() { let mut overlay = prepare_overlay_with_changes(); let mut cache = StorageTransactionCache::default(); - overlay.changes_trie_config = None; - let storage = TestChangesTrieStorage::with_blocks(vec![(100, Default::default())]); let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, Some(&storage), None); + let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); assert_eq!(ext.storage_changes_root(&H256::default().encode()).unwrap(), None); } @@ -661,8 +660,9 @@ mod tests { let mut overlay = prepare_overlay_with_changes(); let mut cache = StorageTransactionCache::default(); let storage = TestChangesTrieStorage::with_blocks(vec![(99, Default::default())]); + let state = Some(ChangesTrieState::new(changes_trie_config(), Zero::zero(), &storage)); let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, Some(&storage), None); + let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, state, None); assert_eq!( ext.storage_changes_root(&H256::default().encode()).unwrap(), Some(hex!("bb0c2ef6e1d36d5490f9766cfcc7dfe2a6ca804504c3bb206053890d6dd02376").to_vec()), @@ -675,8 +675,9 @@ mod tests { let mut cache = StorageTransactionCache::default(); overlay.prospective.top.get_mut(&vec![1]).unwrap().value = None; let storage = TestChangesTrieStorage::with_blocks(vec![(99, Default::default())]); + let state = Some(ChangesTrieState::new(changes_trie_config(), Zero::zero(), &storage)); let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, Some(&storage), None); + let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, state, None); assert_eq!( ext.storage_changes_root(&H256::default().encode()).unwrap(), Some(hex!("96f5aae4690e7302737b6f9b7f8567d5bbb9eac1c315f80101235a92d9ec27f4").to_vec()), diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index ae1a214a72c92..0a29468bbc4ef 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -16,7 +16,11 @@ //! State machine in memory backend. -use crate::{trie_backend::TrieBackend, backend::{Backend, insert_into_memory_db}}; +use crate::{ + StorageKey, StorageValue, StorageCollection, + trie_backend::TrieBackend, + backend::{Backend, insert_into_memory_db}, +}; use std::{error, fmt, collections::{BTreeMap, HashMap}, marker::PhantomData, ops}; use hash_db::Hasher; use sp_trie::{ @@ -43,7 +47,7 @@ impl error::Error for Void { /// In-memory backend. Fully recomputes tries each time `as_trie_backend` is called but useful for /// tests and proof checking. pub struct InMemory { - inner: HashMap, OwnedChildInfo)>, BTreeMap, Vec>>, + inner: HashMap, BTreeMap>, // This field is only needed for returning reference in `as_trie_backend`. trie: Option, H>>, _hasher: PhantomData, @@ -84,7 +88,7 @@ impl PartialEq for InMemory { impl InMemory { /// Copy the state, with applied updates pub fn update< - T: IntoIterator, OwnedChildInfo)>, Vec<(Vec, Option>)>)> + T: IntoIterator, StorageCollection)> >( &self, changes: T, @@ -103,10 +107,10 @@ impl InMemory { } } -impl From, OwnedChildInfo)>, BTreeMap, Vec>>> +impl From, BTreeMap>> for InMemory { - fn from(inner: HashMap, OwnedChildInfo)>, BTreeMap, Vec>>) -> Self { + fn from(inner: HashMap, BTreeMap>) -> Self { InMemory { inner, trie: None, @@ -117,7 +121,7 @@ impl From, OwnedChildInfo)>, BTreeMap impl From for InMemory { fn from(inners: Storage) -> Self { - let mut inner: HashMap, OwnedChildInfo)>, BTreeMap, Vec>> + let mut inner: HashMap, BTreeMap> = inners.children.into_iter().map(|(k, c)| (Some((k, c.child_info)), c.data)).collect(); inner.insert(None, inners.top); InMemory { @@ -128,8 +132,8 @@ impl From for InMemory { } } -impl From, Vec>> for InMemory { - fn from(inner: BTreeMap, Vec>) -> Self { +impl From> for InMemory { + fn from(inner: BTreeMap) -> Self { let mut expanded = HashMap::new(); expanded.insert(None, inner); InMemory { @@ -140,12 +144,12 @@ impl From, Vec>> for InMemory { } } -impl From, OwnedChildInfo)>, Vec<(Vec, Option>)>)>> +impl From, StorageCollection)>> for InMemory { fn from( - inner: Vec<(Option<(Vec, OwnedChildInfo)>, Vec<(Vec, Option>)>)>, + inner: Vec<(Option<(StorageKey, OwnedChildInfo)>, StorageCollection)>, ) -> Self { - let mut expanded: HashMap, OwnedChildInfo)>, BTreeMap, Vec>> + let mut expanded: HashMap, BTreeMap> = HashMap::new(); for (child_info, key_values) in inner { let entry = expanded.entry(child_info).or_default(); @@ -171,12 +175,12 @@ impl InMemory { impl Backend for InMemory where H::Out: Codec { type Error = Void; type Transaction = Vec<( - Option<(Vec, OwnedChildInfo)>, - Vec<(Vec, Option>)>, + Option<(StorageKey, OwnedChildInfo)>, + StorageCollection, )>; type TrieBackendStorage = MemoryDB; - fn storage(&self, key: &[u8]) -> Result>, Self::Error> { + fn storage(&self, key: &[u8]) -> Result, Self::Error> { Ok(self.inner.get(&None).and_then(|map| map.get(key).map(Clone::clone))) } @@ -185,7 +189,7 @@ impl Backend for InMemory where H::Out: Codec { storage_key: &[u8], child_info: ChildInfo, key: &[u8], - ) -> Result>, Self::Error> { + ) -> Result, Self::Error> { Ok(self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) .and_then(|map| map.get(key).map(Clone::clone))) } @@ -279,7 +283,7 @@ impl Backend for InMemory where H::Out: Codec { (root, is_default, vec![(child_info, full_transaction)]) } - fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { + fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error> { let range = (ops::Bound::Excluded(key), ops::Bound::Unbounded); let next_key = self.inner.get(&None) .and_then(|map| map.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()); @@ -292,7 +296,7 @@ impl Backend for InMemory where H::Out: Codec { storage_key: &[u8], child_info: ChildInfo, key: &[u8], - ) -> Result>, Self::Error> { + ) -> Result, Self::Error> { let range = (ops::Bound::Excluded(key), ops::Bound::Unbounded); let next_key = self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) .and_then(|map| map.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()); @@ -300,14 +304,14 @@ impl Backend for InMemory where H::Out: Codec { Ok(next_key) } - fn pairs(&self) -> Vec<(Vec, Vec)> { + fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { self.inner.get(&None) .into_iter() .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), v.clone()))) .collect() } - fn keys(&self, prefix: &[u8]) -> Vec> { + fn keys(&self, prefix: &[u8]) -> Vec { self.inner.get(&None) .into_iter() .flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned()) @@ -319,7 +323,7 @@ impl Backend for InMemory where H::Out: Codec { storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], - ) -> Vec> { + ) -> Vec { self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) .into_iter() .flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned()) diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index c8329178c6c4d..bb62df6da4905 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -23,8 +23,8 @@ use log::{warn, trace}; use hash_db::Hasher; use codec::{Decode, Encode, Codec}; use sp_core::{ - storage::{well_known_keys, ChildInfo}, NativeOrEncoded, NeverNativeValue, - traits::CodeExecutor, hexdisplay::HexDisplay + storage::ChildInfo, NativeOrEncoded, NeverNativeValue, + traits::{CodeExecutor, CallInWasmExt}, hexdisplay::HexDisplay, }; use overlayed_changes::OverlayedChangeSet; use sp_externalities::Extensions; @@ -49,18 +49,23 @@ pub use ext::Ext; pub use backend::Backend; pub use changes_trie::{ AnchorBlockId as ChangesTrieAnchorBlockId, + State as ChangesTrieState, Storage as ChangesTrieStorage, RootsStorage as ChangesTrieRootsStorage, InMemoryStorage as InMemoryChangesTrieStorage, BuildCache as ChangesTrieBuildCache, CacheAction as ChangesTrieCacheAction, ConfigurationRange as ChangesTrieConfigurationRange, - key_changes, key_changes_proof, key_changes_proof_check, + key_changes, key_changes_proof, + key_changes_proof_check, key_changes_proof_check_with_db, prune as prune_changes_tries, - oldest_non_pruned_trie as oldest_non_pruned_changes_trie, + disabled_state as disabled_changes_trie_state, BlockNumber as ChangesTrieBlockNumber, }; -pub use overlayed_changes::{OverlayedChanges, StorageChanges, StorageTransactionCache}; +pub use overlayed_changes::{ + OverlayedChanges, StorageChanges, StorageTransactionCache, StorageKey, StorageValue, + StorageCollection, ChildStorageCollection, +}; pub use proving_backend::{ create_proof_check_backend, create_proof_check_backend_storage, merge_storage_proofs, ProofRecorder, ProvingBackend, ProvingBackendRecorder, StorageProof, @@ -171,7 +176,7 @@ fn always_untrusted_wasm() -> ExecutionManager +pub struct StateMachine<'a, B, H, N, Exec> where H: Hasher, B: Backend, @@ -183,29 +188,30 @@ pub struct StateMachine<'a, B, H, N, T, Exec> call_data: &'a [u8], overlay: &'a mut OverlayedChanges, extensions: Extensions, - changes_trie_storage: Option<&'a T>, + changes_trie_state: Option>, _marker: PhantomData<(H, N)>, storage_transaction_cache: Option<&'a mut StorageTransactionCache>, } -impl<'a, B, H, N, T, Exec> StateMachine<'a, B, H, N, T, Exec> where +impl<'a, B, H, N, Exec> StateMachine<'a, B, H, N, Exec> where H: Hasher, H::Out: Ord + 'static + codec::Codec, - Exec: CodeExecutor, + Exec: CodeExecutor + Clone + 'static, B: Backend, - T: ChangesTrieStorage, N: crate::changes_trie::BlockNumber, { /// Creates new substrate state machine. pub fn new( backend: &'a B, - changes_trie_storage: Option<&'a T>, + changes_trie_state: Option>, overlay: &'a mut OverlayedChanges, exec: &'a Exec, method: &'a str, call_data: &'a [u8], - extensions: Extensions, + mut extensions: Extensions, ) -> Self { + extensions.register(CallInWasmExt::new(exec.clone())); + Self { backend, exec, @@ -213,7 +219,7 @@ impl<'a, B, H, N, T, Exec> StateMachine<'a, B, H, N, T, Exec> where call_data, extensions, overlay, - changes_trie_storage, + changes_trie_state, _marker: PhantomData, storage_transaction_cache: None, } @@ -271,7 +277,7 @@ impl<'a, B, H, N, T, Exec> StateMachine<'a, B, H, N, T, Exec> where self.overlay, cache, self.backend, - self.changes_trie_storage.clone(), + self.changes_trie_state.clone(), Some(&mut self.extensions), ); @@ -386,20 +392,8 @@ impl<'a, B, H, N, T, Exec> StateMachine<'a, B, H, N, T, Exec> where CallResult, ) -> CallResult { - // read changes trie configuration. The reason why we're doing it here instead of the - // `OverlayedChanges` constructor is that we need proofs for this read as a part of - // proof-of-execution on light clients. And the proof is recorded by the backend which - // is created after OverlayedChanges - - let init_overlay = |overlay: &mut OverlayedChanges, final_check: bool, backend: &B| { - let changes_trie_config = try_read_overlay_value( - overlay, - backend, - well_known_keys::CHANGES_TRIE_CONFIG - )?; - set_changes_trie_config(overlay, changes_trie_config, final_check) - }; - init_overlay(self.overlay, false, &self.backend)?; + let changes_tries_enabled = self.changes_trie_state.is_some(); + self.overlay.set_collect_extrinsics(changes_tries_enabled); let result = { let orig_prospective = self.overlay.prospective.clone(); @@ -431,16 +425,12 @@ impl<'a, B, H, N, T, Exec> StateMachine<'a, B, H, N, T, Exec> where } }; - if result.is_ok() { - init_overlay(self.overlay, true, self.backend)?; - } - result.map_err(|e| Box::new(e) as _) } } /// Prove execution using the given state backend, overlayed changes, and call executor. -pub fn prove_execution( +pub fn prove_execution( mut backend: B, overlay: &mut OverlayedChanges, exec: &Exec, @@ -451,11 +441,12 @@ where B: Backend, H: Hasher, H::Out: Ord + 'static + codec::Codec, - Exec: CodeExecutor, + Exec: CodeExecutor + Clone + 'static, + N: crate::changes_trie::BlockNumber, { let trie_backend = backend.as_trie_backend() .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; - prove_execution_on_trie_backend(trie_backend, overlay, exec, method, call_data) + prove_execution_on_trie_backend::<_, _, N, _>(trie_backend, overlay, exec, method, call_data) } /// Prove execution using the given trie backend, overlayed changes, and call executor. @@ -467,7 +458,7 @@ where /// /// Note: changes to code will be in place if this call is made again. For running partial /// blocks (e.g. a transaction at a time), ensure a different method is used. -pub fn prove_execution_on_trie_backend( +pub fn prove_execution_on_trie_backend( trie_backend: &TrieBackend, overlay: &mut OverlayedChanges, exec: &Exec, @@ -478,10 +469,11 @@ where S: trie_backend_essence::TrieBackendStorage, H: Hasher, H::Out: Ord + 'static + codec::Codec, - Exec: CodeExecutor, + Exec: CodeExecutor + 'static + Clone, + N: crate::changes_trie::BlockNumber, { let proving_backend = proving_backend::ProvingBackend::new(trie_backend); - let mut sm = StateMachine::<_, H, _, InMemoryChangesTrieStorage, Exec>::new( + let mut sm = StateMachine::<_, H, N, Exec>::new( &proving_backend, None, overlay, exec, method, call_data, Extensions::default(), ); @@ -494,7 +486,7 @@ where } /// Check execution proof, generated by `prove_execution` call. -pub fn execution_proof_check( +pub fn execution_proof_check( root: H::Out, proof: StorageProof, overlay: &mut OverlayedChanges, @@ -504,15 +496,16 @@ pub fn execution_proof_check( ) -> Result, Box> where H: Hasher, - Exec: CodeExecutor, + Exec: CodeExecutor + Clone + 'static, H::Out: Ord + 'static + codec::Codec, + N: crate::changes_trie::BlockNumber, { let trie_backend = create_proof_check_backend::(root.into(), proof)?; - execution_proof_check_on_trie_backend(&trie_backend, overlay, exec, method, call_data) + execution_proof_check_on_trie_backend::<_, N, _>(&trie_backend, overlay, exec, method, call_data) } /// Check execution proof on proving backend, generated by `prove_execution` call. -pub fn execution_proof_check_on_trie_backend( +pub fn execution_proof_check_on_trie_backend( trie_backend: &TrieBackend, H>, overlay: &mut OverlayedChanges, exec: &Exec, @@ -522,9 +515,10 @@ pub fn execution_proof_check_on_trie_backend( where H: Hasher, H::Out: Ord + 'static + codec::Codec, - Exec: CodeExecutor, + Exec: CodeExecutor + Clone + 'static, + N: crate::changes_trie::BlockNumber, { - let mut sm = StateMachine::<_, H, _, InMemoryChangesTrieStorage, Exec>::new( + let mut sm = StateMachine::<_, H, N, Exec>::new( trie_backend, None, overlay, exec, method, call_data, Extensions::default(), ); @@ -690,44 +684,6 @@ where .map_err(|e| Box::new(e) as Box) } -/// Sets overlayed changes' changes trie configuration. Returns error if configuration -/// differs from previous OR config decode has failed. -fn set_changes_trie_config( - overlay: &mut OverlayedChanges, - config: Option>, - final_check: bool, -) -> Result<(), Box> { - let config = match config { - Some(v) => Some(Decode::decode(&mut &v[..]) - .map_err(|_| Box::new("Failed to decode changes trie configuration".to_owned()) as Box)?), - None => None, - }; - - if final_check && overlay.changes_trie_config.is_some() != config.is_some() { - return Err(Box::new("Changes trie configuration change is not supported".to_owned())); - } - - if let Some(config) = config { - if !overlay.set_changes_trie_config(config) { - return Err(Box::new("Changes trie configuration change is not supported".to_owned())); - } - } - Ok(()) -} - -/// Reads storage value from overlay or from the backend. -fn try_read_overlay_value( - overlay: &OverlayedChanges, - backend: &B, key: &[u8], -) -> Result>, Box> where H: Hasher, B: Backend { - match overlay.storage(key).map(|x| x.map(|x| x.to_vec())) { - Some(value) => Ok(value), - None => backend - .storage(key) - .map_err(|err| Box::new(ExecutionError::Backend(format!("{}", err))) as Box), - } -} - #[cfg(test)] mod tests { use std::collections::BTreeMap; @@ -735,12 +691,10 @@ mod tests { use overlayed_changes::OverlayedValue; use super::*; use super::ext::Ext; - use super::changes_trie::{ - InMemoryStorage as InMemoryChangesTrieStorage, - Configuration as ChangesTrieConfig, - }; + use super::changes_trie::Configuration as ChangesTrieConfig; use sp_core::{Blake2Hasher, map, traits::Externalities, storage::ChildStorageKey}; + #[derive(Clone)] struct DummyCodeExecutor { change_changes_trie_config: bool, native_available: bool, @@ -767,7 +721,7 @@ mod tests { ) -> (CallResult, bool) { if self.change_changes_trie_config { ext.place_storage( - well_known_keys::CHANGES_TRIE_CONFIG.to_vec(), + sp_core::storage::well_known_keys::CHANGES_TRIE_CONFIG.to_vec(), Some( ChangesTrieConfig { digest_interval: 777, @@ -797,15 +751,26 @@ mod tests { } } + impl sp_core::traits::CallInWasm for DummyCodeExecutor { + fn call_in_wasm( + &self, + _: &[u8], + _: &str, + _: &[u8], + _: &mut dyn Externalities, + ) -> std::result::Result, String> { + unimplemented!("Not required in tests.") + } + } + #[test] fn execute_works() { let backend = trie_backend::tests::test_trie(); let mut overlayed_changes = Default::default(); - let changes_trie_storage = InMemoryChangesTrieStorage::::new(); let mut state_machine = StateMachine::new( &backend, - Some(&changes_trie_storage), + changes_trie::disabled_state::<_, u64>(), &mut overlayed_changes, &DummyCodeExecutor { change_changes_trie_config: false, @@ -829,11 +794,10 @@ mod tests { fn execute_works_with_native_else_wasm() { let backend = trie_backend::tests::test_trie(); let mut overlayed_changes = Default::default(); - let changes_trie_storage = InMemoryChangesTrieStorage::::new(); let mut state_machine = StateMachine::new( &backend, - Some(&changes_trie_storage), + changes_trie::disabled_state::<_, u64>(), &mut overlayed_changes, &DummyCodeExecutor { change_changes_trie_config: false, @@ -854,11 +818,10 @@ mod tests { let mut consensus_failed = false; let backend = trie_backend::tests::test_trie(); let mut overlayed_changes = Default::default(); - let changes_trie_storage = InMemoryChangesTrieStorage::::new(); let mut state_machine = StateMachine::new( &backend, - Some(&changes_trie_storage), + changes_trie::disabled_state::<_, u64>(), &mut overlayed_changes, &DummyCodeExecutor { change_changes_trie_config: false, @@ -895,7 +858,7 @@ mod tests { // fetch execution proof from 'remote' full node let remote_backend = trie_backend::tests::test_trie(); let remote_root = remote_backend.storage_root(std::iter::empty()).0; - let (remote_result, remote_proof) = prove_execution( + let (remote_result, remote_proof) = prove_execution::<_, _, u64, _>( remote_backend, &mut Default::default(), &executor, @@ -904,7 +867,7 @@ mod tests { ).unwrap(); // check proof locally - let local_result = execution_proof_check::( + let local_result = execution_proof_check::( remote_root, remote_proof, &mut Default::default(), @@ -941,13 +904,12 @@ mod tests { }; { - let changes_trie_storage = InMemoryChangesTrieStorage::::new(); let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new( &mut overlay, &mut cache, backend, - Some(&changes_trie_storage), + changes_trie::disabled_state::<_, u64>(), None, ); ext.clear_prefix(b"ab"); @@ -972,14 +934,13 @@ mod tests { fn set_child_storage_works() { let mut state = InMemoryBackend::::default(); let backend = state.as_trie_backend().unwrap(); - let changes_trie_storage = InMemoryChangesTrieStorage::::new(); let mut overlay = OverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new( &mut overlay, &mut cache, backend, - Some(&changes_trie_storage), + changes_trie::disabled_state::<_, u64>(), None, ); @@ -1065,30 +1026,6 @@ mod tests { ); } - #[test] - fn cannot_change_changes_trie_config() { - let backend = trie_backend::tests::test_trie(); - let mut overlayed_changes = Default::default(); - let changes_trie_storage = InMemoryChangesTrieStorage::::new(); - - let mut state_machine = StateMachine::new( - &backend, - Some(&changes_trie_storage), - &mut overlayed_changes, - &DummyCodeExecutor { - change_changes_trie_config: true, - native_available: false, - native_succeeds: true, - fallback_succeeds: true, - }, - "test", - &[], - Default::default(), - ); - - assert!(state_machine.execute(ExecutionStrategy::NativeWhenPossible).is_err()); - } - #[test] fn child_storage_uuid() { const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); @@ -1100,13 +1037,12 @@ mod tests { let subtrie2 = ChildStorageKey::from_slice(b":child_storage:default:sub_test2").unwrap(); let mut transaction = { let backend = test_trie(); - let changes_trie_storage = InMemoryChangesTrieStorage::::new(); let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new( &mut overlay, &mut cache, &backend, - Some(&changes_trie_storage), + changes_trie::disabled_state::<_, u64>(), None, ); ext.set_child_storage(subtrie1, CHILD_INFO_1, b"abc".to_vec(), b"def".to_vec()); @@ -1124,28 +1060,4 @@ mod tests { } assert!(!duplicate); } - - #[test] - fn cannot_change_changes_trie_config_with_native_else_wasm() { - let backend = trie_backend::tests::test_trie(); - let mut overlayed_changes = Default::default(); - let changes_trie_storage = InMemoryChangesTrieStorage::::new(); - - let mut state_machine = StateMachine::new( - &backend, - Some(&changes_trie_storage), - &mut overlayed_changes, - &DummyCodeExecutor { - change_changes_trie_config: true, - native_available: false, - native_succeeds: true, - fallback_succeeds: true, - }, - "test", - &[], - Default::default(), - ); - - assert!(state_machine.execute(ExecutionStrategy::NativeElseWasm).is_err()); - } } diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index eafd31241038b..ed6f30a4f596b 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -19,8 +19,8 @@ use crate::{ backend::Backend, ChangesTrieTransaction, changes_trie::{ - NO_EXTRINSIC_INDEX, Configuration as ChangesTrieConfig, BlockNumber, build_changes_trie, - Storage as ChangesTrieStorage, + NO_EXTRINSIC_INDEX, BlockNumber, build_changes_trie, + State as ChangesTrieState, }, }; @@ -33,6 +33,18 @@ use std::{mem, ops}; use hash_db::Hasher; +/// Storage key. +pub type StorageKey = Vec; + +/// Storage value. +pub type StorageValue = Vec; + +/// In memory array of storage values. +pub type StorageCollection = Vec<(StorageKey, Option)>; + +/// In memory arrays of storage values for multiple child tries. +pub type ChildStorageCollection = Vec<(StorageKey, StorageCollection)>; + /// The overlayed changes to state to be queried on top of the backend. /// /// A transaction shares all prospective changes within an inner overlay @@ -43,9 +55,8 @@ pub struct OverlayedChanges { pub(crate) prospective: OverlayedChangeSet, /// Committed changes. pub(crate) committed: OverlayedChangeSet, - /// Changes trie configuration. None by default, but could be installed by the - /// runtime if it supports change tries. - pub(crate) changes_trie_config: Option, + /// True if extrinsiscs stats must be collected. + pub(crate) collect_extrinsics: bool, } /// The storage value, used inside OverlayedChanges. @@ -53,7 +64,7 @@ pub struct OverlayedChanges { #[cfg_attr(test, derive(PartialEq))] pub struct OverlayedValue { /// Current value. None if value has been deleted. - pub value: Option>, + pub value: Option, /// The set of extinsic indices where the values has been changed. /// Is filled only if runtime has announced changes trie support. pub extrinsics: Option>, @@ -64,9 +75,9 @@ pub struct OverlayedValue { #[cfg_attr(test, derive(PartialEq))] pub struct OverlayedChangeSet { /// Top level storage changes. - pub top: BTreeMap, OverlayedValue>, + pub top: BTreeMap, /// Child storage changes. - pub children: HashMap, (BTreeMap, OverlayedValue>, OwnedChildInfo)>, + pub children: HashMap, OwnedChildInfo)>, } /// A storage changes structure that can be generated by the data collected in [`OverlayedChanges`]. @@ -77,9 +88,9 @@ pub struct StorageChanges { /// All changes to the main storage. /// /// A value of `None` means that it was deleted. - pub main_storage_changes: Vec<(Vec, Option>)>, + pub main_storage_changes: StorageCollection, /// All changes to the child storages. - pub child_storage_changes: Vec<(Vec, Vec<(Vec, Option>)>)>, + pub child_storage_changes: ChildStorageCollection, /// A transaction for the backend that contains all changes from /// [`main_storage_changes`](Self::main_storage_changes) and from /// [`child_storage_changes`](Self::child_storage_changes). @@ -95,8 +106,8 @@ pub struct StorageChanges { impl StorageChanges { /// Deconstruct into the inner values pub fn into_inner(self) -> ( - Vec<(Vec, Option>)>, - Vec<(Vec, Vec<(Vec, Option>)>)>, + StorageCollection, + ChildStorageCollection, Transaction, H::Out, Option>, @@ -156,8 +167,8 @@ impl Default for StorageChanges } #[cfg(test)] -impl FromIterator<(Vec, OverlayedValue)> for OverlayedChangeSet { - fn from_iter, OverlayedValue)>>(iter: T) -> Self { +impl FromIterator<(StorageKey, OverlayedValue)> for OverlayedChangeSet { + fn from_iter>(iter: T) -> Self { Self { top: iter.into_iter().collect(), children: Default::default(), @@ -184,20 +195,9 @@ impl OverlayedChanges { self.prospective.is_empty() && self.committed.is_empty() } - /// Sets the changes trie configuration. - /// - /// Returns false if configuration has been set already and we now trying - /// to install different configuration. This isn't supported now. - pub(crate) fn set_changes_trie_config(&mut self, config: ChangesTrieConfig) -> bool { - if let Some(ref old_config) = self.changes_trie_config { - // we do not support changes trie configuration' change now - if *old_config != config { - return false; - } - } - - self.changes_trie_config = Some(config); - true + /// Ask to collect/not to collect extrinsics indices where key(s) has been changed. + pub fn set_collect_extrinsics(&mut self, collect_extrinsics: bool) { + self.collect_extrinsics = collect_extrinsics; } /// Returns a double-Option: None if the key is unknown (i.e. and the query should be referred @@ -231,7 +231,7 @@ impl OverlayedChanges { /// Inserts the given key-value pair into the prospective change set. /// /// `None` can be used to delete a value specified by the given key. - pub(crate) fn set_storage(&mut self, key: Vec, val: Option>) { + pub(crate) fn set_storage(&mut self, key: StorageKey, val: Option) { let extrinsic_index = self.extrinsic_index(); let entry = self.prospective.top.entry(key).or_default(); entry.value = val; @@ -247,10 +247,10 @@ impl OverlayedChanges { /// `None` can be used to delete a value specified by the given key. pub(crate) fn set_child_storage( &mut self, - storage_key: Vec, + storage_key: StorageKey, child_info: ChildInfo, - key: Vec, - val: Option>, + key: StorageKey, + val: Option, ) { let extrinsic_index = self.extrinsic_index(); let map_entry = self.prospective.children.entry(storage_key) @@ -429,8 +429,8 @@ impl OverlayedChanges { /// Panics: /// Will panic if there are any uncommitted prospective changes. pub fn into_committed(self) -> ( - impl Iterator, Option>)>, - impl Iterator, (impl Iterator, Option>)>, OwnedChildInfo))>, + impl Iterator)>, + impl Iterator)>, OwnedChildInfo))>, ){ assert!(self.prospective.is_empty()); ( @@ -442,11 +442,11 @@ impl OverlayedChanges { /// Convert this instance with all changes into a [`StorageChanges`] instance. pub fn into_storage_changes< - B: Backend, H: Hasher, N: BlockNumber, T: ChangesTrieStorage + B: Backend, H: Hasher, N: BlockNumber >( self, backend: &B, - changes_trie_storage: Option<&T>, + changes_trie_state: Option<&ChangesTrieState>, parent_hash: H::Out, mut cache: StorageTransactionCache, ) -> Result, String> where H::Out: Ord + Encode + 'static { @@ -463,7 +463,7 @@ impl OverlayedChanges { if cache.changes_trie_transaction.is_none() { self.changes_trie_root( backend, - changes_trie_storage, + changes_trie_state, parent_hash, false, &mut cache, @@ -501,7 +501,7 @@ impl OverlayedChanges { /// Changes that are made outside of extrinsics, are marked with /// `NO_EXTRINSIC_INDEX` index. fn extrinsic_index(&self) -> Option { - match self.changes_trie_config.is_some() { + match self.collect_extrinsics { true => Some( self.storage(EXTRINSIC_INDEX) .and_then(|idx| idx.and_then(|idx| Decode::decode(&mut &*idx).ok())) @@ -557,17 +557,17 @@ impl OverlayedChanges { /// # Panics /// /// Panics on storage error, when `panic_on_storage_error` is set. - pub fn changes_trie_root, T: ChangesTrieStorage>( + pub fn changes_trie_root<'a, H: Hasher, N: BlockNumber, B: Backend>( &self, backend: &B, - changes_trie_storage: Option<&T>, + changes_trie_state: Option<&'a ChangesTrieState<'a, H, N>>, parent_hash: H::Out, panic_on_storage_error: bool, cache: &mut StorageTransactionCache, ) -> Result, ()> where H::Out: Ord + Encode + 'static { - build_changes_trie::<_, T, H, N>( + build_changes_trie::<_, H, N>( backend, - changes_trie_storage, + changes_trie_state, self, parent_hash, panic_on_storage_error, @@ -643,8 +643,8 @@ impl OverlayedChanges { } #[cfg(test)] -impl From>> for OverlayedValue { - fn from(value: Option>) -> OverlayedValue { +impl From> for OverlayedValue { + fn from(value: Option) -> OverlayedValue { OverlayedValue { value, ..Default::default() } } } @@ -656,12 +656,11 @@ mod tests { Blake2Hasher, traits::Externalities, storage::well_known_keys::EXTRINSIC_INDEX, }; use crate::InMemoryBackend; - use crate::changes_trie::InMemoryStorage as InMemoryChangesTrieStorage; use crate::ext::Ext; use super::*; - fn strip_extrinsic_index(map: &BTreeMap, OverlayedValue>) - -> BTreeMap, OverlayedValue> + fn strip_extrinsic_index(map: &BTreeMap) + -> BTreeMap { let mut clone = map.clone(); clone.remove(&EXTRINSIC_INDEX.to_vec()); @@ -718,13 +717,12 @@ mod tests { ..Default::default() }; - let changes_trie_storage = InMemoryChangesTrieStorage::::new(); let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new( &mut overlay, &mut cache, &backend, - Some(&changes_trie_storage), + crate::changes_trie::disabled_state::<_, u64>(), None, ); const ROOT: [u8; 32] = hex!("39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa"); @@ -732,57 +730,10 @@ mod tests { assert_eq!(&ext.storage_root()[..], &ROOT); } - #[test] - fn changes_trie_configuration_is_saved() { - let mut overlay = OverlayedChanges::default(); - assert!(overlay.changes_trie_config.is_none()); - assert_eq!( - overlay.set_changes_trie_config( - ChangesTrieConfig { digest_interval: 4, digest_levels: 1, }, - ), - true, - ); - assert!(overlay.changes_trie_config.is_some()); - } - - #[test] - fn changes_trie_configuration_is_saved_twice() { - let mut overlay = OverlayedChanges::default(); - assert!(overlay.changes_trie_config.is_none()); - assert_eq!(overlay.set_changes_trie_config(ChangesTrieConfig { - digest_interval: 4, digest_levels: 1, - }), true); - overlay.set_extrinsic_index(0); - overlay.set_storage(vec![1], Some(vec![2])); - assert_eq!(overlay.set_changes_trie_config(ChangesTrieConfig { - digest_interval: 4, digest_levels: 1, - }), true); - assert_eq!( - strip_extrinsic_index(&overlay.prospective.top), - vec![ - (vec![1], OverlayedValue { value: Some(vec![2]), - extrinsics: Some(vec![0].into_iter().collect()) }), - ].into_iter().collect(), - ); - } - - #[test] - fn panics_when_trying_to_save_different_changes_trie_configuration() { - let mut overlay = OverlayedChanges::default(); - assert_eq!(overlay.set_changes_trie_config(ChangesTrieConfig { - digest_interval: 4, digest_levels: 1, - }), true); - assert_eq!(overlay.set_changes_trie_config(ChangesTrieConfig { - digest_interval: 2, digest_levels: 1, - }), false); - } - #[test] fn extrinsic_changes_are_collected() { let mut overlay = OverlayedChanges::default(); - let _ = overlay.set_changes_trie_config(ChangesTrieConfig { - digest_interval: 4, digest_levels: 1, - }); + overlay.set_collect_extrinsics(true); overlay.set_storage(vec![100], Some(vec![101])); diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 1baf6d6b4da70..39a34509b720b 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -17,12 +17,16 @@ //! Test implementation for Externalities. use std::any::{Any, TypeId}; +use codec::Decode; use hash_db::Hasher; use crate::{ backend::Backend, OverlayedChanges, StorageTransactionCache, ext::Ext, InMemoryBackend, + StorageKey, StorageValue, changes_trie::{ + Configuration as ChangesTrieConfiguration, InMemoryStorage as ChangesTrieInMemoryStorage, BlockNumber as ChangesTrieBlockNumber, + State as ChangesTrieState, }, }; use sp_core::{ @@ -45,6 +49,7 @@ where as Backend>::Transaction, H, N >, backend: InMemoryBackend, + changes_trie_config: Option, changes_trie_storage: ChangesTrieInMemoryStorage, extensions: Extensions, } @@ -54,12 +59,19 @@ impl TestExternalities H::Out: Ord + 'static + codec::Codec { /// Get externalities implementation. - pub fn ext(&mut self) -> Ext, ChangesTrieInMemoryStorage> { + pub fn ext(&mut self) -> Ext> { Ext::new( &mut self.overlay, &mut self.storage_transaction_cache, &self.backend, - Some(&self.changes_trie_storage), + match self.changes_trie_config.clone() { + Some(config) => Some(ChangesTrieState { + config, + zero: 0.into(), + storage: &self.changes_trie_storage, + }), + None => None, + }, Some(&mut self.extensions), ) } @@ -72,21 +84,19 @@ impl TestExternalities /// Create a new instance of `TestExternalities` with code and storage. pub fn new_with_code(code: &[u8], mut storage: Storage) -> Self { let mut overlay = OverlayedChanges::default(); + let changes_trie_config = storage.top.get(CHANGES_TRIE_CONFIG) + .and_then(|v| Decode::decode(&mut &v[..]).ok()); + overlay.set_collect_extrinsics(changes_trie_config.is_some()); assert!(storage.top.keys().all(|key| !is_child_storage_key(key))); assert!(storage.children.keys().all(|key| is_child_storage_key(key))); - super::set_changes_trie_config( - &mut overlay, - storage.top.get(&CHANGES_TRIE_CONFIG.to_vec()).cloned(), - false, - ).expect("changes trie configuration is correct in test env; qed"); - storage.top.insert(HEAP_PAGES.to_vec(), 8u64.encode()); storage.top.insert(CODE.to_vec(), code.to_vec()); TestExternalities { overlay, + changes_trie_config, changes_trie_storage: ChangesTrieInMemoryStorage::new(), backend: storage.into(), extensions: Default::default(), @@ -95,7 +105,7 @@ impl TestExternalities } /// Insert key/value into backend - pub fn insert(&mut self, k: Vec, v: Vec) { + pub fn insert(&mut self, k: StorageKey, v: StorageValue) { self.backend = self.backend.update(vec![(None, vec![(k, Some(v))])]); } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 1920ea40a88b5..dbaae323c09f2 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -20,10 +20,12 @@ use log::{warn, debug}; use hash_db::Hasher; use sp_trie::{Trie, delta_trie_root, default_child_trie_root, child_delta_trie_root}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; -use crate::trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral}; -use crate::Backend; use sp_core::storage::ChildInfo; use codec::{Codec, Decode}; +use crate::{ + StorageKey, StorageValue, Backend, + trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral}, +}; /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. pub struct TrieBackend, H: Hasher> { @@ -72,7 +74,7 @@ impl, H: Hasher> Backend for TrieBackend where type Transaction = S::Overlay; type TrieBackendStorage = S; - fn storage(&self, key: &[u8]) -> Result>, Self::Error> { + fn storage(&self, key: &[u8]) -> Result, Self::Error> { self.essence.storage(key) } @@ -81,11 +83,11 @@ impl, H: Hasher> Backend for TrieBackend where storage_key: &[u8], child_info: ChildInfo, key: &[u8], - ) -> Result>, Self::Error> { + ) -> Result, Self::Error> { self.essence.child_storage(storage_key, child_info, key) } - fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { + fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error> { self.essence.next_storage_key(key) } @@ -94,7 +96,7 @@ impl, H: Hasher> Backend for TrieBackend where storage_key: &[u8], child_info: ChildInfo, key: &[u8], - ) -> Result>, Self::Error> { + ) -> Result, Self::Error> { self.essence.next_child_storage_key(storage_key, child_info, key) } @@ -125,7 +127,7 @@ impl, H: Hasher> Backend for TrieBackend where self.essence.for_child_keys_with_prefix(storage_key, child_info, prefix, f) } - fn pairs(&self) -> Vec<(Vec, Vec)> { + fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { let mut read_overlay = S::Overlay::default(); let eph = Ephemeral::new(self.essence.backend_storage(), &mut read_overlay); @@ -149,7 +151,7 @@ impl, H: Hasher> Backend for TrieBackend where } } - fn keys(&self, prefix: &[u8]) -> Vec> { + fn keys(&self, prefix: &[u8]) -> Vec { let mut read_overlay = S::Overlay::default(); let eph = Ephemeral::new(self.essence.backend_storage(), &mut read_overlay); @@ -170,7 +172,7 @@ impl, H: Hasher> Backend for TrieBackend where } fn storage_root(&self, delta: I) -> (H::Out, S::Overlay) - where I: IntoIterator, Option>)> + where I: IntoIterator)> { let mut write_overlay = S::Overlay::default(); let mut root = *self.essence.root(); @@ -197,7 +199,7 @@ impl, H: Hasher> Backend for TrieBackend where delta: I, ) -> (H::Out, bool, Self::Transaction) where - I: IntoIterator, Option>)>, + I: IntoIterator)>, H::Out: Ord, { let default_root = default_child_trie_root::>(storage_key); diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index f7f5006f41dab..2598682ae0668 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -25,7 +25,7 @@ use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, default_child_trie_root, read_trie_value, read_child_trie_value, for_keys_in_child_trie, KeySpacedDB}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; -use crate::backend::Consolidate; +use crate::{backend::Consolidate, StorageKey, StorageValue}; use sp_core::storage::ChildInfo; use codec::Encode; @@ -67,7 +67,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: /// Return the next key in the trie i.e. the minimum key that is strictly superior to `key` in /// lexicographic order. - pub fn next_storage_key(&self, key: &[u8]) -> Result>, String> { + pub fn next_storage_key(&self, key: &[u8]) -> Result, String> { self.next_storage_key_from_root(&self.root, None, key) } @@ -78,7 +78,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: storage_key: &[u8], child_info: ChildInfo, key: &[u8], - ) -> Result>, String> { + ) -> Result, String> { let child_root = match self.storage(storage_key)? { Some(child_root) => child_root, None => return Ok(None), @@ -101,7 +101,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: root: &H::Out, child_info: Option, key: &[u8], - ) -> Result>, String> { + ) -> Result, String> { let mut read_overlay = S::Overlay::default(); let eph = Ephemeral { storage: &self.storage, @@ -146,7 +146,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: } /// Get the value of storage at given key. - pub fn storage(&self, key: &[u8]) -> Result>, String> { + pub fn storage(&self, key: &[u8]) -> Result, String> { let mut read_overlay = S::Overlay::default(); let eph = Ephemeral { storage: &self.storage, @@ -164,7 +164,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: storage_key: &[u8], child_info: ChildInfo, key: &[u8], - ) -> Result>, String> { + ) -> Result, String> { let root = self.storage(storage_key)? .unwrap_or(default_child_trie_root::>(storage_key).encode()); diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index 78e32e33334df..a78a26db736c4 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -15,13 +15,13 @@ harness = false codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false } sp-std = { version = "2.0.0", default-features = false, path = "../std" } hash-db = { version = "0.15.2", default-features = false } -trie-db = { version = "0.18.1", default-features = false } +trie-db = { version = "0.19.2", default-features = false } trie-root = { version = "0.15.2", default-features = false } -memory-db = { version = "0.18.0", default-features = false } +memory-db = { version = "0.18.1", default-features = false } sp-core = { version = "2.0.0", default-features = false, path = "../core" } [dev-dependencies] -trie-bench = "0.18.0" +trie-bench = "0.19.0" trie-standardmap = "0.15.2" criterion = "0.2.11" hex-literal = "0.2.1" diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 4ca26542dd4bb..c71d3fb84ce79 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -27,6 +27,8 @@ use sp_std::boxed::Box; use sp_std::marker::PhantomData; use sp_std::vec::Vec; use hash_db::{Hasher, Prefix}; +use trie_db::proof::{generate_proof, verify_proof}; +pub use trie_db::proof::VerifyError; /// Our `NodeCodec`-specific error. pub use error::Error; /// The Substrate format implementation of `TrieStream`. @@ -119,6 +121,47 @@ pub mod trie_types { pub type TrieError = trie_db::TrieError; } +/// Create a proof for a subset of keys in a trie. +/// +/// The `keys` may contain any set of keys regardless of each one of them is included +/// in the `db`. +/// +/// For a key `K` that is included in the `db` a proof of inclusion is generated. +/// For a key `K` that is not included in the `db` a proof of non-inclusion is generated. +/// These can be later checked in `verify_trie_proof`. +pub fn generate_trie_proof<'a, L: TrieConfiguration, I, K, DB>( + db: &DB, + root: TrieHash, + keys: I, +) -> Result>, Box>> where + I: IntoIterator, + K: 'a + AsRef<[u8]>, + DB: hash_db::HashDBRef, +{ + let trie = TrieDB::::new(db, &root)?; + generate_proof(&trie, keys) +} + +/// Verify a set of key-value pairs against a trie root and a proof. +/// +/// Checks a set of keys with optional values for inclusion in the proof that was generated by +/// `generate_trie_proof`. +/// If the value in the pair is supplied (`(key, Some(value))`), this key-value pair will be +/// checked for inclusion in the proof. +/// If the value is omitted (`(key, None)`), this key will be checked for non-inclusion in the +/// proof. +pub fn verify_trie_proof<'a, L: TrieConfiguration, I, K, V>( + root: &TrieHash, + proof: &[Vec], + items: I, +) -> Result<(), VerifyError, error::Error>> where + I: IntoIterator)>, + K: 'a + AsRef<[u8]>, + V: 'a + AsRef<[u8]>, +{ + verify_proof::, _, _, _>(root, proof, items) +} + /// Determine a trie root given a hash DB and delta values. pub fn delta_trie_root( db: &mut DB, @@ -727,4 +770,89 @@ mod tests { assert_eq!(pairs, iter_pairs); } + + #[test] + fn proof_non_inclusion_works() { + let pairs = vec![ + (hex!("0102").to_vec(), hex!("01").to_vec()), + (hex!("0203").to_vec(), hex!("0405").to_vec()), + ]; + + let mut memdb = MemoryDB::default(); + let mut root = Default::default(); + populate_trie::(&mut memdb, &mut root, &pairs); + + let non_included_key: Vec = hex!("0909").to_vec(); + let proof = generate_trie_proof::( + &memdb, + root, + &[non_included_key.clone()] + ).unwrap(); + + // Verifying that the K was not included into the trie should work. + assert!(verify_trie_proof::>( + &root, + &proof, + &[(non_included_key.clone(), None)], + ).is_ok() + ); + + // Verifying that the K was included into the trie should fail. + assert!(verify_trie_proof::>( + &root, + &proof, + &[(non_included_key, Some(hex!("1010").to_vec()))], + ).is_err() + ); + } + + #[test] + fn proof_inclusion_works() { + let pairs = vec![ + (hex!("0102").to_vec(), hex!("01").to_vec()), + (hex!("0203").to_vec(), hex!("0405").to_vec()), + ]; + + let mut memdb = MemoryDB::default(); + let mut root = Default::default(); + populate_trie::(&mut memdb, &mut root, &pairs); + + let proof = generate_trie_proof::( + &memdb, + root, + &[pairs[0].0.clone()] + ).unwrap(); + + // Check that a K, V included into the proof are verified. + assert!(verify_trie_proof::( + &root, + &proof, + &[(pairs[0].0.clone(), Some(pairs[0].1.clone()))] + ).is_ok() + ); + + // Absence of the V is not verified with the proof that has K, V included. + assert!(verify_trie_proof::>( + &root, + &proof, + &[(pairs[0].0.clone(), None)] + ).is_err() + ); + + // K not included into the trie is not verified. + assert!(verify_trie_proof::( + &root, + &proof, + &[(hex!("4242").to_vec(), Some(pairs[0].1.clone()))] + ).is_err() + ); + + // K included into the trie but not included into the proof is not verified. + assert!(verify_trie_proof::( + &root, + &proof, + &[(pairs[1].0.clone(), Some(pairs[1].1.clone()))] + ).is_err() + ); + } } diff --git a/primitives/version/Cargo.toml b/primitives/version/Cargo.toml index fbbf0cfa94607..784051b56e5e1 100644 --- a/primitives/version/Cargo.toml +++ b/primitives/version/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" [dependencies] impl-serde = { version = "0.2.3", optional = true } serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.0.5", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.1.2", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0", default-features = false, path = "../std" } sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } diff --git a/primitives/version/src/lib.rs b/primitives/version/src/lib.rs index 3211c46253631..0534f87490868 100644 --- a/primitives/version/src/lib.rs +++ b/primitives/version/src/lib.rs @@ -25,11 +25,11 @@ use std::fmt; #[cfg(feature = "std")] use std::collections::HashSet; -use codec::Encode; -#[cfg(feature = "std")] -use codec::Decode; +use codec::{Encode, Decode}; use sp_runtime::RuntimeString; pub use sp_runtime::create_runtime_str; +#[doc(hidden)] +pub use sp_std; #[cfg(feature = "std")] use sp_runtime::{traits::Block as BlockT, generic::BlockId}; @@ -37,25 +37,13 @@ use sp_runtime::{traits::Block as BlockT, generic::BlockId}; /// The identity of a particular API interface that the runtime might provide. pub type ApiId = [u8; 8]; -/// A vector of pairs of `ApiId` and a `u32` for version. For `"std"` builds, this -/// is a `Cow`. -#[cfg(feature = "std")] -pub type ApisVec = std::borrow::Cow<'static, [(ApiId, u32)]>; -/// A vector of pairs of `ApiId` and a `u32` for version. For `"no-std"` builds, this -/// is just a reference. -#[cfg(not(feature = "std"))] -pub type ApisVec = &'static [(ApiId, u32)]; +/// A vector of pairs of `ApiId` and a `u32` for version. +pub type ApisVec = sp_std::borrow::Cow<'static, [(ApiId, u32)]>; /// Create a vector of Api declarations. #[macro_export] -#[cfg(feature = "std")] -macro_rules! create_apis_vec { - ( $y:expr ) => { std::borrow::Cow::Borrowed(& $y) } -} -#[macro_export] -#[cfg(not(feature = "std"))] macro_rules! create_apis_vec { - ( $y:expr ) => { & $y } + ( $y:expr ) => { $crate::sp_std::borrow::Cow::Borrowed(& $y) } } /// Runtime version. @@ -63,8 +51,8 @@ macro_rules! create_apis_vec { /// This triplet have different semantics and mis-interpretation could cause problems. /// In particular: bug fixes should result in an increment of `spec_version` and possibly `authoring_version`, /// absolutely not `impl_version` since they change the semantics of the runtime. -#[derive(Clone, PartialEq, Eq, Encode, Default, sp_runtime::RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Decode))] +#[derive(Clone, PartialEq, Eq, Encode, Decode, Default, sp_runtime::RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] pub struct RuntimeVersion { /// Identifies the different Substrate runtimes. There'll be at least polkadot and node. diff --git a/primitives/wasm-interface/src/lib.rs b/primitives/wasm-interface/src/lib.rs index bc34bfda9700b..b0bbbf82db3c1 100644 --- a/primitives/wasm-interface/src/lib.rs +++ b/primitives/wasm-interface/src/lib.rs @@ -39,13 +39,17 @@ pub enum ValueType { /// Values supported by Substrate on the boundary between host/Wasm. #[derive(PartialEq, Debug, Clone, Copy)] pub enum Value { - /// An `i32` value. + /// A 32-bit integer. I32(i32), - /// An `i64` value. + /// A 64-bit integer. I64(i64), - /// An nan-preserving `f32` value. + /// A 32-bit floating-point number stored as raw bit pattern. + /// + /// You can materialize this value using `f32::from_bits`. F32(u32), - /// An nan-preserving `f64` value. + /// A 64-bit floating-point number stored as raw bit pattern. + /// + /// You can materialize this value using `f64::from_bits`. F64(u64), } diff --git a/test-utils/client/Cargo.toml b/test-utils/client/Cargo.toml index 05dc41b2233a1..8b1ebc39f4724 100644 --- a/test-utils/client/Cargo.toml +++ b/test-utils/client/Cargo.toml @@ -6,10 +6,10 @@ edition = "2018" [dependencies] sc-client-api = { version = "2.0.0", path = "../../client/api" } -sc-client = { version = "2.0.0", path = "../../client/" } -sc-client-db = { version = "2.0.0", features = ["test-helpers"], path = "../../client/db" } +sc-client = { version = "0.8", path = "../../client/" } +sc-client-db = { version = "0.8", features = ["test-helpers"], path = "../../client/db" } sp-consensus = { version = "0.8", path = "../../primitives/consensus/common" } -sc-executor = { version = "2.0.0", path = "../../client/executor" } +sc-executor = { version = "0.8", path = "../../client/executor" } futures = "0.3.1" hash-db = "0.15.2" sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } @@ -17,4 +17,4 @@ codec = { package = "parity-scale-codec", version = "1.0.0" } sp-core = { version = "2.0.0", path = "../../primitives/core" } sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sp-state-machine = { version = "2.0.0", path = "../../primitives/state-machine" } +sp-state-machine = { version = "0.8", path = "../../primitives/state-machine" } diff --git a/test-utils/client/src/client_ext.rs b/test-utils/client/src/client_ext.rs index 67d08c5fa600f..aa7383e3ab367 100644 --- a/test-utils/client/src/client_ext.rs +++ b/test-utils/client/src/client_ext.rs @@ -64,7 +64,7 @@ pub trait ClientBlockImportExt: Sized { impl ClientExt for Client where B: sc_client_api::backend::Backend, - E: sc_client::CallExecutor, + E: sc_client::CallExecutor + 'static, Self: BlockImport, Block: BlockT, { diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index ac1cfe4d6b6a5..8603b26d50a58 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -165,7 +165,7 @@ impl TestClientBuilder >, sc_client::LongestChain, ) where - Executor: sc_client::CallExecutor, + Executor: sc_client::CallExecutor + 'static, Backend: sc_client_api::backend::Backend, Block: BlockT, { @@ -190,7 +190,7 @@ impl TestClientBuilder let client = sc_client::Client::new( self.backend.clone(), executor, - storage, + &storage, Default::default(), Default::default(), ExecutionExtensions::new( @@ -224,8 +224,8 @@ impl TestClientBuilder< sc_client::LongestChain, ) where I: Into>>, - E: sc_executor::NativeExecutionDispatch, - Backend: sc_client_api::backend::Backend, + E: sc_executor::NativeExecutionDispatch + 'static, + Backend: sc_client_api::backend::Backend + 'static, Block: BlockT, { let executor = executor.into().unwrap_or_else(|| diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index 6444c7fcf1463..b827163a1b0f7 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -32,15 +32,15 @@ pallet-babe = { version = "2.0.0", default-features = false, path = "../../frame frame-system = { version = "2.0.0", default-features = false, path = "../../frame/system" } frame-system-rpc-runtime-api = { version = "2.0.0", default-features = false, path = "../../frame/system/rpc/runtime-api" } pallet-timestamp = { version = "2.0.0", default-features = false, path = "../../frame/timestamp" } -sc-client = { version = "2.0.0", optional = true, path = "../../client" } +sc-client = { version = "0.8", optional = true, path = "../../client" } sp-trie = { version = "2.0.0", default-features = false, path = "../../primitives/trie" } sp-transaction-pool = { version = "2.0.0", default-features = false, path = "../../primitives/transaction-pool" } -trie-db = { version = "0.18.1", default-features = false } +trie-db = { version = "0.19.2", default-features = false } [dev-dependencies] -sc-executor = { version = "2.0.0", path = "../../client/executor" } +sc-executor = { version = "0.8", path = "../../client/executor" } substrate-test-runtime-client = { version = "2.0.0", path = "./client" } -sp-state-machine = { version = "2.0.0", path = "../../primitives/state-machine" } +sp-state-machine = { version = "0.8", path = "../../primitives/state-machine" } [build-dependencies] wasm-builder-runner = { version = "1.0.4", package = "substrate-wasm-builder-runner", path = "../../utils/wasm-builder-runner" } diff --git a/test-utils/runtime/client/Cargo.toml b/test-utils/runtime/client/Cargo.toml index 8e08f6a1b5ec8..aa33f550127e8 100644 --- a/test-utils/runtime/client/Cargo.toml +++ b/test-utils/runtime/client/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] -sc-block-builder = { version = "2.0.0", path = "../../../client/block-builder" } +sc-block-builder = { version = "0.8", path = "../../../client/block-builder" } substrate-test-client = { version = "2.0.0", path = "../../client" } sp-core = { version = "2.0.0", path = "../../../primitives/core" } substrate-test-runtime = { version = "2.0.0", path = "../../runtime" } @@ -14,5 +14,5 @@ sp-api = { version = "2.0.0", path = "../../../primitives/api" } sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } codec = { package = "parity-scale-codec", version = "1.0.0" } sc-client-api = { version = "2.0.0", path = "../../../client/api" } -sc-client = { version = "2.0.0", path = "../../../client/" } +sc-client = { version = "0.8", path = "../../../client/" } futures = "0.3.1" diff --git a/test-utils/runtime/client/src/block_builder_ext.rs b/test-utils/runtime/client/src/block_builder_ext.rs index 7626ac2ffa5f4..6b9a6f79ab191 100644 --- a/test-utils/runtime/client/src/block_builder_ext.rs +++ b/test-utils/runtime/client/src/block_builder_ext.rs @@ -17,6 +17,7 @@ //! Block Builder extensions for tests. use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_core::ChangesTrieConfiguration; use sc_client_api::backend; use sp_runtime::traits::HasherFor; @@ -32,6 +33,11 @@ pub trait BlockBuilderExt { key: Vec, value: Option>, ) -> Result<(), sp_blockchain::Error>; + /// Add changes trie configuration update extrinsic to the block. + fn push_changes_trie_configuration_update( + &mut self, + new_config: Option, + ) -> Result<(), sp_blockchain::Error>; } impl<'a, A, B> BlockBuilderExt for sc_block_builder::BlockBuilder<'a, substrate_test_runtime::Block, A, B> where @@ -57,4 +63,11 @@ impl<'a, A, B> BlockBuilderExt for sc_block_builder::BlockBuilder<'a, substrate_ ) -> Result<(), sp_blockchain::Error> { self.push(substrate_test_runtime::Extrinsic::StorageChange(key, value)) } + + fn push_changes_trie_configuration_update( + &mut self, + new_config: Option, + ) -> Result<(), sp_blockchain::Error> { + self.push(substrate_test_runtime::Extrinsic::ChangesTrieConfigUpdate(new_config)) + } } diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index 4faf7f8ab4851..7646f4a96023c 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -30,7 +30,7 @@ pub use sc_client::LongestChain; pub use self::block_builder_ext::BlockBuilderExt; -use sp_core::sr25519; +use sp_core::{sr25519, ChangesTrieConfiguration}; use sp_core::storage::{ChildInfo, Storage, StorageChild}; use substrate_test_runtime::genesismap::{GenesisConfig, additional_storage_with_genesis}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Hash as HashT, NumberFor, HasherFor}; @@ -42,7 +42,6 @@ use sc_client::{ }, }; - /// A prelude to import in tests. pub mod prelude { // Trait extensions @@ -59,21 +58,12 @@ pub mod prelude { pub use super::{AccountKeyring, Sr25519Keyring}; } -mod local_executor { - #![allow(missing_docs)] - use substrate_test_runtime; - use crate::sc_executor::native_executor_instance; - // FIXME #1576 change the macro and pass in the `BlakeHasher` that dispatch needs from here instead - native_executor_instance!( - pub LocalExecutor, - substrate_test_runtime::api::dispatch, - substrate_test_runtime::native_version - ); +sc_executor::native_executor_instance! { + pub LocalExecutor, + substrate_test_runtime::api::dispatch, + substrate_test_runtime::native_version, } -/// Native executor used for tests. -pub use self::local_executor::LocalExecutor; - /// Test client database backend. pub type Backend = substrate_test_client::Backend; @@ -101,7 +91,7 @@ pub type LightExecutor = sc_client::light::call_executor::GenesisCallExecutor< /// Parameters of test-client builder with test-runtime. #[derive(Default)] pub struct GenesisParameters { - support_changes_trie: bool, + changes_trie_config: Option, heap_pages_override: Option, extra_storage: Storage, } @@ -109,7 +99,7 @@ pub struct GenesisParameters { impl GenesisParameters { fn genesis_config(&self) -> GenesisConfig { GenesisConfig::new( - self.support_changes_trie, + self.changes_trie_config.clone(), vec![ sr25519::Public::from(Sr25519Keyring::Alice).into(), sr25519::Public::from(Sr25519Keyring::Bob).into(), @@ -180,9 +170,9 @@ pub trait TestClientBuilderExt: Sized { /// Returns a mutable reference to the genesis parameters. fn genesis_init_mut(&mut self) -> &mut GenesisParameters; - /// Enable or disable support for changes trie in genesis. - fn set_support_changes_trie(mut self, support_changes_trie: bool) -> Self { - self.genesis_init_mut().support_changes_trie = support_changes_trie; + /// Set changes trie configuration for genesis. + fn changes_trie_config(mut self, config: Option) -> Self { + self.genesis_init_mut().changes_trie_config = config; self } @@ -245,7 +235,7 @@ impl TestClientBuilderExt for TestClientBuilder< sc_client::LocalCallExecutor>, B > where - B: sc_client_api::backend::Backend, + B: sc_client_api::backend::Backend + 'static, // Rust bug: https://github.com/rust-lang/rust/issues/24159 >::State: sp_api::StateBackend>, @@ -353,7 +343,7 @@ pub fn new_light() -> ( let storage = sc_client_db::light::LightStorage::new_test(); let blockchain = Arc::new(sc_client::light::blockchain::Blockchain::new(storage)); let backend = Arc::new(LightBackend::new(blockchain.clone())); - let executor = NativeExecutor::new(WasmExecutionMethod::Interpreted, None); + let executor = new_native_executor(); let local_call_executor = sc_client::LocalCallExecutor::new(backend.clone(), executor); let call_executor = LightExecutor::new( backend.clone(), @@ -372,3 +362,8 @@ pub fn new_light() -> ( pub fn new_light_fetcher() -> LightFetcher { LightFetcher::default() } + +/// Create a new native executor. +pub fn new_native_executor() -> sc_executor::NativeExecutor { + sc_executor::NativeExecutor::new(sc_executor::WasmExecutionMethod::Interpreted, None) +} diff --git a/test-utils/runtime/src/genesismap.rs b/test-utils/runtime/src/genesismap.rs index f6fbcddf44e16..25d9a807ccee1 100644 --- a/test-utils/runtime/src/genesismap.rs +++ b/test-utils/runtime/src/genesismap.rs @@ -36,7 +36,7 @@ pub struct GenesisConfig { impl GenesisConfig { pub fn new( - support_changes_trie: bool, + changes_trie_config: Option, authorities: Vec, endowed_accounts: Vec, balance: u64, @@ -44,10 +44,7 @@ impl GenesisConfig { extra_storage: Storage, ) -> Self { GenesisConfig { - changes_trie_config: match support_changes_trie { - true => Some(super::changes_trie_config()), - false => None, - }, + changes_trie_config, authorities: authorities.clone(), balances: endowed_accounts.into_iter().map(|a| (a, balance)).collect(), heap_pages_override, diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 4dfddfda21aec..e68ed5f6fc17e 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -25,7 +25,7 @@ pub mod system; use sp_std::{prelude::*, marker::PhantomData}; use codec::{Encode, Decode, Input, Error}; -use sp_core::{Blake2Hasher, OpaqueMetadata, RuntimeDebug}; +use sp_core::{Blake2Hasher, OpaqueMetadata, RuntimeDebug, ChangesTrieConfiguration}; use sp_application_crypto::{ed25519, sr25519, RuntimeAppPublic}; use trie_db::{TrieMut, Trie}; use sp_trie::PrefixedMemoryDB; @@ -65,7 +65,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_name: create_runtime_str!("parity-test"), authoring_version: 1, spec_version: 1, + #[cfg(feature = "std")] impl_version: 1, + #[cfg(not(feature = "std"))] + impl_version: 2, apis: RUNTIME_API_VERSIONS, }; @@ -108,6 +111,7 @@ pub enum Extrinsic { Transfer(Transfer, AccountSignature), IncludeData(Vec), StorageChange(Vec, Option>), + ChangesTrieConfigUpdate(Option), } #[cfg(feature = "std")] @@ -132,6 +136,8 @@ impl BlindCheckable for Extrinsic { }, Extrinsic::IncludeData(_) => Err(InvalidTransaction::BadProof.into()), Extrinsic::StorageChange(key, value) => Ok(Extrinsic::StorageChange(key, value)), + Extrinsic::ChangesTrieConfigUpdate(new_config) => + Ok(Extrinsic::ChangesTrieConfigUpdate(new_config)), } } } @@ -193,14 +199,6 @@ pub fn run_tests(mut input: &[u8]) -> Vec { [stxs.len() as u8].encode() } -/// Changes trie configuration (optionally) used in tests. -pub fn changes_trie_config() -> sp_core::ChangesTrieConfiguration { - sp_core::ChangesTrieConfiguration { - digest_interval: 4, - digest_levels: 2, - } -} - /// A type that can not be decoded. #[derive(PartialEq)] pub struct DecodeFails { diff --git a/test-utils/runtime/src/system.rs b/test-utils/runtime/src/system.rs index b04ebc1cf2f56..4d12ab8437d5b 100644 --- a/test-utils/runtime/src/system.rs +++ b/test-utils/runtime/src/system.rs @@ -35,7 +35,7 @@ use frame_system::Trait; use crate::{ AccountId, BlockNumber, Extrinsic, Transfer, H256 as Hash, Block, Header, Digest, AuthorityId }; -use sp_core::storage::well_known_keys; +use sp_core::{storage::well_known_keys, ChangesTrieConfiguration}; const NONCE_OF: &[u8] = b"nonce:"; const BALANCE_OF: &[u8] = b"balance:"; @@ -51,6 +51,7 @@ decl_storage! { Number get(fn number): Option; ParentHash get(fn parent_hash): Hash; NewAuthorities get(fn new_authorities): Option>; + NewChangesTrieConfig get(fn new_changes_trie_config): Option>; StorageDigest get(fn storage_digest): Option; Authorities get(fn authorities) config(): Vec; } @@ -206,6 +207,8 @@ pub fn finalize_block() -> Header { let mut digest = ::take().expect("StorageDigest is set by `initialize_block`"); let o_new_authorities = ::take(); + let new_changes_trie_config = ::take(); + // This MUST come after all changes to storage are done. Otherwise we will fail the // “Storage root does not match that calculated” assertion. let storage_root = Hash::decode(&mut &storage_root()[..]) @@ -222,6 +225,12 @@ pub fn finalize_block() -> Header { digest.push(generic::DigestItem::Consensus(*b"babe", new_authorities.encode())); } + if let Some(new_config) = new_changes_trie_config { + digest.push(generic::DigestItem::ChangesTrieSignal( + generic::ChangesTrieSignal::NewConfiguration(new_config) + )); + } + Header { number, extrinsics_root, @@ -244,6 +253,8 @@ fn execute_transaction_backend(utx: &Extrinsic) -> ApplyExtrinsicResult { Extrinsic::AuthoritiesChange(ref new_auth) => execute_new_authorities_backend(new_auth), Extrinsic::IncludeData(_) => Ok(Ok(())), Extrinsic::StorageChange(key, value) => execute_storage_change(key, value.as_ref().map(|v| &**v)), + Extrinsic::ChangesTrieConfigUpdate(ref new_config) => + execute_changes_trie_config_update(new_config.clone()), } } @@ -286,6 +297,18 @@ fn execute_storage_change(key: &[u8], value: Option<&[u8]>) -> ApplyExtrinsicRes Ok(Ok(())) } +fn execute_changes_trie_config_update(new_config: Option) -> ApplyExtrinsicResult { + match new_config.clone() { + Some(new_config) => storage::unhashed::put_raw( + well_known_keys::CHANGES_TRIE_CONFIG, + &new_config.encode(), + ), + None => storage::unhashed::kill(well_known_keys::CHANGES_TRIE_CONFIG), + } + ::put(new_config); + Ok(Ok(())) +} + #[cfg(feature = "std")] fn info_expect_equal_hash(given: &Hash, expected: &Hash) { use sp_core::hexdisplay::HexDisplay; diff --git a/utils/browser/Cargo.toml b/utils/browser/Cargo.toml index b58774075b34b..cfceaa143c7be 100644 --- a/utils/browser/Cargo.toml +++ b/utils/browser/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "browser-utils" -version = "2.0.0" +version = "0.8.0" authors = ["Parity Technologies "] description = "Utilities for creating a browser light-client." edition = "2018" @@ -16,7 +16,7 @@ js-sys = "0.3.34" wasm-bindgen = "0.2.57" wasm-bindgen-futures = "0.4.7" kvdb-web = "0.3" -service = { version = "2.0.0", package = "sc-service", path = "../../client/service", default-features = false } +service = { version = "0.8", package = "sc-service", path = "../../client/service", default-features = false } network = { package = "sc-network", path = "../../client/network" } chain-spec = { package = "sc-chain-spec", path = "../../client/chain-spec" } diff --git a/utils/browser/src/lib.rs b/utils/browser/src/lib.rs index e404c6612906c..0dbde57182766 100644 --- a/utils/browser/src/lib.rs +++ b/utils/browser/src/lib.rs @@ -22,10 +22,7 @@ use service::{ ChainSpec, RuntimeGenesis }; use wasm_bindgen::prelude::*; -use futures::{ - TryFutureExt as _, FutureExt as _, Stream as _, Future as _, TryStreamExt as _, - channel::{oneshot, mpsc}, future::{poll_fn, ok}, compat::*, -}; +use futures::{prelude::*, channel::{oneshot, mpsc}, future::{poll_fn, ok}, compat::*}; use std::task::Poll; use std::pin::Pin; use chain_spec::Extension; @@ -82,8 +79,7 @@ struct RpcMessage { } /// Create a Client object that connects to a service. -pub fn start_client(service: impl AbstractService) -> Client { - let mut service = service.compat(); +pub fn start_client(mut service: impl AbstractService) -> Client { // We dispatch a background task responsible for processing the service. // // The main action performed by the code below consists in polling the service with @@ -94,10 +90,8 @@ pub fn start_client(service: impl AbstractService) -> Client { loop { match Pin::new(&mut rpc_send_rx).poll_next(cx) { Poll::Ready(Some(message)) => { - let fut = service.get_ref() + let fut = service .rpc_query(&message.session, &message.rpc_json) - .compat() - .unwrap_or_else(|_| None) .boxed(); let _ = message.send_back.send(fut); }, diff --git a/utils/frame/rpc/support/Cargo.toml b/utils/frame/rpc/support/Cargo.toml index f26a69cc5fa0c..19451743cd268 100644 --- a/utils/frame/rpc/support/Cargo.toml +++ b/utils/frame/rpc/support/Cargo.toml @@ -12,7 +12,7 @@ codec = { package = "parity-scale-codec", version = "1" } serde = "1" frame-support = { version = "2.0.0", path = "../../../../frame/support" } sp-storage = { version = "2.0.0", path = "../../../../primitives/storage" } -sc-rpc-api = { version = "2.0.0", path = "../../../../client/rpc-api" } +sc-rpc-api = { version = "0.8", path = "../../../../client/rpc-api" } [dev-dependencies] frame-system = { version = "2.0.0", path = "../../../../frame/system" } diff --git a/utils/frame/rpc/system/Cargo.toml b/utils/frame/rpc/system/Cargo.toml index 1a479c54e57d5..39f7735eed682 100644 --- a/utils/frame/rpc/system/Cargo.toml +++ b/utils/frame/rpc/system/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] -sc-client = { version = "2.0.0", path = "../../../../client/" } +sc-client = { version = "0.8", path = "../../../../client/" } codec = { package = "parity-scale-codec", version = "1.0.0" } futures = "0.3.1" jsonrpc-core = "14.0.3" diff --git a/utils/grafana-data-source/src/database.rs b/utils/grafana-data-source/src/database.rs deleted file mode 100644 index f20917cf785d4..0000000000000 --- a/utils/grafana-data-source/src/database.rs +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use std::collections::HashMap; -use std::convert::TryFrom; -use crate::Error; - -pub struct Database { - base_timestamp: i64, - storage: HashMap> -} - -impl Database { - /// Create a new Database. - pub fn new() -> Self { - Self { - base_timestamp: now_millis(), - storage: HashMap::new() - } - } - - /// Produce an iterator for keys starting with a base string. - pub fn keys_starting_with<'a>(&'a self, base: &'a str) -> impl Iterator + 'a { - self.storage.keys() - .filter(move |key| key.starts_with(base)) - .cloned() - } - - /// Select `max_datapoints` datapoints that have been added between `from` and `to`. - pub fn datapoints_between(&self, key: &str, from: i64, to: i64, max_datapoints: usize) -> Option> { - self.storage.get(key) - .map(|vec| { - let from = find_index(vec, self.base_timestamp, from); - let to = find_index(vec, self.base_timestamp, to); - let slice = &vec[from .. to]; - - if max_datapoints == 0 { - Vec::new() - } else if max_datapoints >= slice.len() { - // Just convert the slice as-is - slice.iter() - .map(|dp| dp.make_absolute(self.base_timestamp)) - .collect() - } else { - // We have more datapoints than we need, so we need to skip some - (0 .. max_datapoints - 1) - .map(|i| &slice[i * slice.len() / (max_datapoints - 1)]) - .chain(slice.last()) - .map(|dp| dp.make_absolute(self.base_timestamp)) - .collect() - } - }) - } - - /// Push a new datapoint. Will error if the base timestamp hasn't been updated in `2^32` - /// milliseconds (49 days). - pub fn push(&mut self, key: &str, value: f32) -> Result<(), Error> { - self.storage.entry(key.into()) - .or_insert_with(Vec::new) - .push(Datapoint::new(self.base_timestamp, value)?); - - Ok(()) - } - - /// Set a new base timestamp, and remove metrics older than this new timestamp. Errors if the - /// difference between timestamps is greater than `2^32` milliseconds (49 days). - pub fn truncate(&mut self, new_base_timestamp: i64) -> Result<(), Error> { - // Ensure that the new base is older. - if self.base_timestamp >= new_base_timestamp { - return Ok(()); - } - - // If the old base timestamp was too long ago, the - let delta = u32::try_from(new_base_timestamp - self.base_timestamp) - .map_err(Error::Timestamp)?; - - for metric in self.storage.values_mut() { - // Find the index of the oldest allowed timestamp and cut out all those before it. - let index = find_index(&metric, self.base_timestamp, new_base_timestamp); - - *metric = metric.iter_mut() - .skip(index) - .map(|dp| { - dp.delta_timestamp -= delta; - *dp - }) - .collect(); - } - - self.base_timestamp = new_base_timestamp; - - Ok(()) - } -} - -#[derive(Clone, Copy)] -struct Datapoint { - delta_timestamp: u32, - value: f32 -} - -impl Datapoint { - fn new(base_timestamp: i64, value: f32) -> Result { - Ok(Self { - delta_timestamp: u32::try_from(now_millis() - base_timestamp) - .map_err(Error::Timestamp)?, - value - }) - } - - fn make_absolute(self, base_timestamp: i64) -> (f32, i64) { - (self.value, base_timestamp + self.delta_timestamp as i64) - } -} - -fn find_index(slice: &[Datapoint], base_timestamp: i64, timestamp: i64) -> usize { - slice.binary_search_by_key(×tamp, |datapoint| { - base_timestamp + datapoint.delta_timestamp as i64 - }).unwrap_or_else(|index| index) -} - -/// Get the current unix timestamp in milliseconds. -fn now_millis() -> i64 { - chrono::Utc::now().timestamp_millis() -} - -#[test] -fn test() { - let mut database = Database::new(); - - database.push("test", 1.0).unwrap(); - database.push("test", 2.5).unwrap(); - database.push("test", 2.0).unwrap(); - database.push("test 2", 1.0).unwrap(); - - let mut keys: Vec<_> = database.keys_starting_with("test").collect(); - keys.sort(); - - assert_eq!(keys, ["test", "test 2"]); - assert_eq!(database.keys_starting_with("test ").collect::>(), ["test 2"]); -} diff --git a/utils/grafana-data-source/src/lib.rs b/utils/grafana-data-source/src/lib.rs deleted file mode 100644 index eacb0c59df38d..0000000000000 --- a/utils/grafana-data-source/src/lib.rs +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! [Grafana] data source server -//! -//! To display node statistics with [Grafana], this module exposes a `run_server` function that -//! starts up a HTTP server that conforms to the [`grafana-json-data-source`] API. The -//! `record_metrics` macro can be used to pass metrics to this server. -//! -//! [Grafana]: https://grafana.com/ -//! [`grafana-json-data-source`]: https://github.com/simPod/grafana-json-datasource - -#![warn(missing_docs)] - -use lazy_static::lazy_static; -use parking_lot::RwLock; - -mod types; -mod server; -#[cfg(not(target_os = "unknown"))] -pub mod networking; -mod database; - -use database::Database; -pub use server::run_server; -use std::num::TryFromIntError; - -lazy_static! { - // The `RwLock` wrapping the metrics database. - static ref DATABASE: RwLock = RwLock::new(Database::new()); -} - -/// Write metrics to `METRICS`. -#[macro_export] -macro_rules! record_metrics( - ($($key:expr => $value:expr,)*) => { - if cfg!(not(target_os = "unknown")) { - $crate::record_metrics_slice(&[ - $( ($key, $value as f32), )* - ]) - } else { - Ok(()) - } - } -); - -/// Write metrics to `METRICS` as a slice. Intended to be only used via `record_metrics!`. -pub fn record_metrics_slice(metrics: &[(&str, f32)]) -> Result<(), Error> { - let mut database = crate::DATABASE.write(); - - for &(key, value) in metrics.iter() { - database.push(key, value)?; - } - - Ok(()) -} - -/// Error type that can be returned by either `record_metrics` or `run_server`. -#[derive(Debug, derive_more::Display, derive_more::From)] -pub enum Error { - /// Hyper internal error. - Hyper(hyper::Error), - /// Serialization/deserialization error. - Serde(serde_json::Error), - /// Http request error. - Http(hyper::http::Error), - /// Timestamp error. - Timestamp(TryFromIntError), - /// i/o error. - Io(std::io::Error) -} - -impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::Hyper(error) => Some(error), - Error::Serde(error) => Some(error), - Error::Http(error) => Some(error), - Error::Timestamp(error) => Some(error), - Error::Io(error) => Some(error) - } - } -} diff --git a/utils/grafana-data-source/src/server.rs b/utils/grafana-data-source/src/server.rs deleted file mode 100644 index 8ef5e0378478c..0000000000000 --- a/utils/grafana-data-source/src/server.rs +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use serde::{Serialize, de::DeserializeOwned}; -use hyper::{Body, Request, Response, header, service::{service_fn, make_service_fn}, Server}; -use chrono::{Duration, Utc}; -use futures_util::{FutureExt, TryStreamExt, future::{Future, select, Either}}; -use futures_timer::Delay; -use crate::{DATABASE, Error, types::{Target, Query, TimeseriesData, Range}}; - -async fn api_response(req: Request) -> Result, Error> { - match req.uri().path() { - "/search" => { - map_request_to_response(req, |target: Target| { - // Filter and return metrics relating to the target - DATABASE.read() - .keys_starting_with(&target.target) - .collect::>() - }).await - }, - "/query" => { - map_request_to_response(req, |query: Query| { - let metrics = DATABASE.read(); - - let Query { - range: Range { from, to }, - max_datapoints, .. - } = query; - - // Return timeseries data related to the specified metrics - query.targets.iter() - .map(|target| { - let datapoints = metrics.datapoints_between(&target.target, from, to, max_datapoints) - .unwrap_or_else(Vec::new); - - TimeseriesData { - target: target.target.clone(), datapoints - } - }) - .collect::>() - }).await - }, - _ => Ok(Response::new(Body::empty())), - } -} - -async fn map_request_to_response(req: Request, transformation: T) -> Result, Error> - where - Req: DeserializeOwned, - Res: Serialize, - T: Fn(Req) -> Res + Send + Sync + 'static -{ - let body = req.into_body() - .map_ok(|bytes| bytes.to_vec()) - .try_concat() - .await - .map_err(Error::Hyper)?; - - let req = serde_json::from_slice(body.as_ref()).map_err(Error::Serde)?; - let res = transformation(req); - let string = serde_json::to_string(&res).map_err(Error::Serde)?; - - Response::builder() - .header(header::CONTENT_TYPE, "application/json") - .body(Body::from(string)) - .map_err(Error::Http) -} - -/// Given that we're not using hyper's tokio feature, we need to define out own executor. -#[derive(Clone)] -pub struct Executor; - -#[cfg(not(target_os = "unknown"))] -impl hyper::rt::Executor for Executor - where - T: Future + Send + 'static, - T::Output: Send + 'static, -{ - fn execute(&self, future: T) { - async_std::task::spawn(future); - } -} - -/// Start the data source server. -#[cfg(not(target_os = "unknown"))] -pub async fn run_server(mut address: std::net::SocketAddr) -> Result<(), Error> { - use async_std::{net, io}; - use crate::networking::Incoming; - - let listener = loop { - let listener = net::TcpListener::bind(&address).await; - match listener { - Ok(listener) => { - log::info!("Grafana data source server started at {}", address); - break listener - }, - Err(err) => match err.kind() { - io::ErrorKind::AddrInUse | io::ErrorKind::PermissionDenied if address.port() != 0 => { - log::warn!( - "Unable to bind grafana data source server to {}. Trying random port.", - address - ); - address.set_port(0); - continue; - }, - _ => return Err(err.into()), - } - } - }; - - let service = make_service_fn(|_| { - async { - Ok::<_, Error>(service_fn(api_response)) - } - }); - - let server = Server::builder(Incoming(listener.incoming())) - .executor(Executor) - .serve(service) - .boxed(); - - let every = std::time::Duration::from_secs(24 * 3600); - let clean = clean_up(every, Duration::weeks(1)) - .boxed(); - - let result = match select(server, clean).await { - Either::Left((result, _)) => result.map_err(Into::into), - Either::Right((result, _)) => result - }; - - result -} - -#[cfg(target_os = "unknown")] -pub async fn run_server(_: std::net::SocketAddr) -> Result<(), Error> { - Ok(()) -} - -/// Periodically remove old metrics. -async fn clean_up(every: std::time::Duration, before: Duration) -> Result<(), Error> { - loop { - Delay::new(every).await; - - let oldest_allowed = (Utc::now() - before).timestamp_millis(); - DATABASE.write().truncate(oldest_allowed)?; - } -} diff --git a/utils/grafana-data-source/src/types.rs b/utils/grafana-data-source/src/types.rs deleted file mode 100644 index 960fc787e3eb2..0000000000000 --- a/utils/grafana-data-source/src/types.rs +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use serde::{Serialize, Deserialize}; - -#[derive(Serialize, Deserialize)] -pub struct Target { - pub target: String, -} - -#[derive(Serialize, Deserialize)] -pub struct Query { - #[serde(rename = "maxDataPoints")] - pub max_datapoints: usize, - pub targets: Vec, - pub range: Range, -} - -#[derive(Serialize, Deserialize)] -pub struct Range { - #[serde(deserialize_with = "date_to_timestamp_ms")] - pub from: i64, - #[serde(deserialize_with = "date_to_timestamp_ms")] - pub to: i64, -} - -// Deserialize a timestamp via a `DateTime` -fn date_to_timestamp_ms<'de, D: serde::Deserializer<'de>>(timestamp: D) -> Result { - Deserialize::deserialize(timestamp) - .map(|date: chrono::DateTime| date.timestamp_millis()) -} - -#[derive(Serialize, Deserialize)] -pub struct TimeseriesData { - pub target: String, - pub datapoints: Vec<(f32, i64)> -} diff --git a/utils/grafana-data-source/test/src/main.rs b/utils/grafana-data-source/test/src/main.rs deleted file mode 100644 index 53deaffc3beb6..0000000000000 --- a/utils/grafana-data-source/test/src/main.rs +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use grafana_data_source::{run_server, record_metrics}; -use std::time::Duration; -use rand::Rng; -use futures::{future::join, executor}; - -async fn randomness() { - loop { - futures_timer::Delay::new(Duration::from_secs(1)).await; - - let random = rand::thread_rng().gen_range(0.0, 1000.0); - - let result = record_metrics!( - "random data" => random, - "random^2" => random * random, - ); - - if let Err(error) = result { - eprintln!("{}", error); - } - } -} - -fn main() { - executor::block_on(join( - run_server("127.0.0.1:9955".parse().unwrap()), - randomness() - )).0.unwrap(); -} diff --git a/utils/prometheus/Cargo.toml b/utils/prometheus/Cargo.toml index a4bf0aa52273e..d335505d58cfe 100644 --- a/utils/prometheus/Cargo.toml +++ b/utils/prometheus/Cargo.toml @@ -1,7 +1,7 @@ [package] -description = "Prometheus endpoint server" -name = "prometheus-endpoint" -version = "2.0.0" +description = "Prometheus exporter server" +name = "prometheus-exporter" +version = "0.8.0" license = "GPL-3.0" authors = ["Parity Technologies "] edition = "2018" diff --git a/utils/prometheus/README.md b/utils/prometheus/README.md index 67199143fcdd6..c6746d573d602 100644 --- a/utils/prometheus/README.md +++ b/utils/prometheus/README.md @@ -1,5 +1,5 @@ # Substrate Prometheus Exporter -![grants](./photo_2019-12-13_16-32-53.jpg) + ## Introduction Prometheus is one of the most widely used monitoring tool for managing highly available services supported by [Cloud Native Computing Foundation](https://www.cncf.io/). By providing Prometheus metrics in Substrate, node operators can easily adopt widely used display/alert tools such as Grafana and Alertmanager without setting-up/operating external Prometheus push gateways (which is an antipattern in the first place) through RPC connections. Easy access to such monitoring tools will benefit parachain developers/operators and validators to have much higher availability of their services. @@ -14,12 +14,11 @@ Start Prometheus ## Metrics -substrate can report and serve the Prometheus metrics, which in their turn can be consumed by Prometheus collector(s). +Substrate can report and serve the Prometheus metrics, which in their turn can be consumed by Prometheus collector(s). This functionality is disabled by default. -To enable the Prometheus metrics, set in your cli command (--prometheus_external, --prometheus-port ). -Metrics will be served under /metrics on 9955 port by default. +Metrics will be served under /metrics on 9615 port by default. ## Start Prometheus @@ -32,7 +31,7 @@ Then edit `prometheus.yml` and add `jobs` : ```yaml - job_name: kusama static_configs: - - targets: ['localhost:9955'] + - targets: ['localhost:9615'] labels: instance: local-validator ``` diff --git a/utils/prometheus/photo_2019-12-13_16-32-53.jpg b/utils/prometheus/photo_2019-12-13_16-32-53.jpg deleted file mode 100644 index cdf44f88fb623..0000000000000 Binary files a/utils/prometheus/photo_2019-12-13_16-32-53.jpg and /dev/null differ diff --git a/utils/prometheus/src/lib.rs b/utils/prometheus/src/lib.rs index 7cb18b220028c..fc53d89ba0301 100644 --- a/utils/prometheus/src/lib.rs +++ b/utils/prometheus/src/lib.rs @@ -18,7 +18,7 @@ extern crate lazy_static; use futures_util::{FutureExt, future::Future}; use hyper::http::StatusCode; -use hyper::{Server, Body, Request, Response, service::{service_fn, make_service_fn}}; +use hyper::{Server, Body, Response, service::{service_fn, make_service_fn}}; use prometheus::{Encoder, Opts, TextEncoder, core::Atomic}; use std::net::SocketAddr; #[cfg(not(target_os = "unknown"))] @@ -27,12 +27,12 @@ mod networking; pub use prometheus::core::{ GenericGauge as Gauge, AtomicF64 as F64, AtomicI64 as I64, AtomicU64 as U64 }; +pub use prometheus::{Registry, Error as PrometheusError}; pub use lazy_static::lazy_static; pub fn create_gauge(name: &str, description: &str) -> Gauge { let opts = Opts::new(name, description); let gauge = Gauge::with_opts(opts).expect("Creating Gauge Failed"); - prometheus::register(Box::new(gauge.clone())).expect("Registering gauge failed"); gauge } @@ -43,20 +43,23 @@ pub enum Error { /// Http request error. Http(hyper::http::Error), /// i/o error. - Io(std::io::Error) + Io(std::io::Error), + #[display(fmt = "Prometheus exporter port {} already in use.", _0)] + PortInUse(SocketAddr) } impl std::error::Error for Error { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self { Error::Hyper(error) => Some(error), Error::Http(error) => Some(error), - Error::Io(error) => Some(error) + Error::Io(error) => Some(error), + Error::PortInUse(_) => None } } } -async fn request_metrics(req: Request) -> Result, Error> { - let metric_families = prometheus::gather(); +async fn request_metrics(registry: Registry) -> Result, Error> { + let metric_families = registry.gather(); let mut buffer = vec![]; let encoder = TextEncoder::new(); encoder.encode(&metric_families, &mut buffer).unwrap(); @@ -84,31 +87,21 @@ impl hyper::rt::Executor for Executor /// Initializes the metrics context, and starts an HTTP server /// to serve metrics. #[cfg(not(target_os = "unknown"))] -pub async fn init_prometheus(mut prometheus_addr: SocketAddr) -> Result<(), Error>{ - use async_std::{net, io}; +pub async fn init_prometheus(prometheus_addr: SocketAddr, registry: Registry) -> Result<(), Error>{ use networking::Incoming; - let listener = loop { - let listener = net::TcpListener::bind(&prometheus_addr).await; - match listener { - Ok(listener) => { - log::info!("Prometheus server started at {}", prometheus_addr); - break listener - }, - Err(err) => match err.kind() { - io::ErrorKind::AddrInUse | io::ErrorKind::PermissionDenied if prometheus_addr.port() != 0 => { - log::warn!( - "Prometheus metric port {} already in use.", prometheus_addr.port() - ); - prometheus_addr.set_port(0); - continue; - }, - _ => return Err(err.into()) - } - } - }; - let service = make_service_fn(|_| { - async { - Ok::<_, Error>(service_fn(request_metrics)) + let listener = async_std::net::TcpListener::bind(&prometheus_addr) + .await + .map_err(|_| Error::PortInUse(prometheus_addr))?; + + log::info!("Prometheus server started at {}", prometheus_addr); + + let service = make_service_fn(move |_| { + let registry = registry.clone(); + + async move { + Ok::<_, hyper::Error>(service_fn(move |_| { + request_metrics(registry.clone()) + })) } });