From a57bc4445a4e0bfd5c79c111add9d0db1a265507 Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Fri, 4 Jun 2021 08:50:59 +0200 Subject: [PATCH 01/61] Storage chain: Runtime module (#8624) * Transaction storage runtime module * WIP: Tests * Tests, benchmarks and docs * Made check_proof mandatory * Typo * Renamed a crate * Apply suggestions from code review Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Added weight for on_finalize * Fixed counter mutations * Reorganized tests * Fixed build * Update for the new inherent API * Reworked for the new inherents API * Apply suggestions from code review Co-authored-by: cheme Co-authored-by: Alexander Popiak Co-authored-by: Shawn Tabrizi * Store transactions in a Vec * Added FeeDestination * Get rid of constants * Fixed node runtime build * Fixed benches * Update frame/transaction-storage/src/lib.rs Co-authored-by: cheme Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: cheme Co-authored-by: Alexander Popiak Co-authored-by: Shawn Tabrizi --- Cargo.lock | 36 ++ Cargo.toml | 2 + bin/node/cli/src/chain_spec.rs | 1 + bin/node/runtime/Cargo.toml | 3 + bin/node/runtime/src/lib.rs | 10 + bin/node/testing/src/genesis.rs | 1 + client/api/src/client.rs | 10 + client/api/src/in_mem.rs | 7 + client/api/src/lib.rs | 1 + client/db/src/lib.rs | 51 +- client/light/src/blockchain.rs | 7 + client/service/Cargo.toml | 1 + client/service/src/client/client.rs | 30 ++ frame/benchmarking/src/lib.rs | 29 +- frame/transaction-storage/Cargo.toml | 50 ++ frame/transaction-storage/README.md | 8 + frame/transaction-storage/src/benchmarking.rs | 147 ++++++ frame/transaction-storage/src/lib.rs | 436 ++++++++++++++++++ frame/transaction-storage/src/mock.rs | 129 ++++++ frame/transaction-storage/src/tests.rs | 157 +++++++ frame/transaction-storage/src/weights.rs | 95 ++++ primitives/blockchain/src/backend.rs | 2 + primitives/externalities/src/lib.rs | 4 +- primitives/io/src/lib.rs | 33 ++ primitives/state-machine/src/ext.rs | 17 +- .../src/overlayed_changes/mod.rs | 13 +- .../transaction-storage-proof/Cargo.toml | 36 ++ .../transaction-storage-proof/README.md | 3 + .../transaction-storage-proof/src/lib.rs | 240 ++++++++++ primitives/trie/src/storage_proof.rs | 4 + 30 files changed, 1534 insertions(+), 29 deletions(-) create mode 100644 frame/transaction-storage/Cargo.toml create mode 100644 frame/transaction-storage/README.md create mode 100644 frame/transaction-storage/src/benchmarking.rs create mode 100644 frame/transaction-storage/src/lib.rs create mode 100644 frame/transaction-storage/src/mock.rs create mode 100644 frame/transaction-storage/src/tests.rs create mode 100644 frame/transaction-storage/src/weights.rs create mode 100644 primitives/transaction-storage-proof/Cargo.toml create mode 100644 primitives/transaction-storage-proof/README.md create mode 100644 primitives/transaction-storage-proof/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index c8a7299835a06..97b64e07e4133 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4377,6 +4377,7 @@ dependencies = [ "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", + "pallet-transaction-storage", "pallet-treasury", "pallet-uniques", "pallet-utility", @@ -5602,6 +5603,26 @@ dependencies = [ "sp-runtime", ] +[[package]] +name = "pallet-transaction-storage" +version = "3.0.0" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-support-test", + "frame-system", + "hex-literal", + "pallet-balances", + "parity-scale-codec", + "serde", + "sp-core", + "sp-inherents", + "sp-io", + "sp-runtime", + "sp-std", + "sp-transaction-storage-proof", +] + [[package]] name = "pallet-treasury" version = "3.0.0" @@ -7953,6 +7974,7 @@ dependencies = [ "sp-state-machine", "sp-tracing", "sp-transaction-pool", + "sp-transaction-storage-proof", "sp-trie", "sp-utils", "sp-version", @@ -9314,6 +9336,20 @@ dependencies = [ "thiserror", ] +[[package]] +name = "sp-transaction-storage-proof" +version = "3.0.0" +dependencies = [ + "async-trait", + "log", + "parity-scale-codec", + "sp-core", + "sp-inherents", + "sp-runtime", + "sp-std", + "sp-trie", +] + [[package]] name = "sp-trie" version = "3.0.0" diff --git a/Cargo.toml b/Cargo.toml index 8b613c021a9fe..f7552f0bbbc48 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -123,6 +123,7 @@ members = [ "frame/transaction-payment", "frame/transaction-payment/rpc", "frame/transaction-payment/rpc/runtime-api", + "frame/transaction-storage", "frame/treasury", "frame/tips", "frame/uniques", @@ -180,6 +181,7 @@ members = [ "primitives/timestamp", "primitives/tracing", "primitives/transaction-pool", + "primitives/transaction-storage-proof", "primitives/trie", "primitives/utils", "primitives/version", diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs index eb3ee5124ac0a..3454aa83c24d4 100644 --- a/bin/node/cli/src/chain_spec.rs +++ b/bin/node/cli/src/chain_spec.rs @@ -335,6 +335,7 @@ pub fn testnet_genesis( }, pallet_vesting: Default::default(), pallet_gilt: Default::default(), + pallet_transaction_storage: Default::default(), } } diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index ca1ed7f3dcc09..e57944674fcc4 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -85,6 +85,7 @@ pallet-treasury = { version = "3.0.0", default-features = false, path = "../../. pallet-utility = { version = "3.0.0", default-features = false, path = "../../../frame/utility" } pallet-transaction-payment = { version = "3.0.0", default-features = false, path = "../../../frame/transaction-payment" } pallet-transaction-payment-rpc-runtime-api = { version = "3.0.0", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } +pallet-transaction-storage = { version = "3.0.0", default-features = false, path = "../../../frame/transaction-storage" } pallet-uniques = { version = "3.0.0", default-features = false, path = "../../../frame/uniques" } pallet-vesting = { version = "3.0.0", default-features = false, path = "../../../frame/vesting" } @@ -152,6 +153,7 @@ std = [ "pallet-tips/std", "pallet-transaction-payment-rpc-runtime-api/std", "pallet-transaction-payment/std", + "pallet-transaction-storage/std", "pallet-treasury/std", "sp-transaction-pool/std", "pallet-utility/std", @@ -194,6 +196,7 @@ runtime-benchmarks = [ "pallet-staking/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-tips/runtime-benchmarks", + "pallet-transaction-storage/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-uniques/runtime-benchmarks", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 3732adfb9a78d..97975c55e9601 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1114,6 +1114,14 @@ impl pallet_uniques::Config for Runtime { type WeightInfo = pallet_uniques::weights::SubstrateWeight; } +impl pallet_transaction_storage::Config for Runtime { + type Event = Event; + type Currency = Balances; + type Call = Call; + type FeeDestination = (); + type WeightInfo = pallet_transaction_storage::weights::SubstrateWeight; +} + construct_runtime!( pub enum Runtime where Block = Block, @@ -1159,6 +1167,7 @@ construct_runtime!( Lottery: pallet_lottery::{Pallet, Call, Storage, Event}, Gilt: pallet_gilt::{Pallet, Call, Storage, Event, Config}, Uniques: pallet_uniques::{Pallet, Call, Storage, Event}, + TransactionStorage: pallet_transaction_storage::{Pallet, Call, Storage, Inherent, Config, Event}, } ); @@ -1532,6 +1541,7 @@ impl_runtime_apis! { add_benchmark!(params, batches, frame_system, SystemBench::); add_benchmark!(params, batches, pallet_timestamp, Timestamp); add_benchmark!(params, batches, pallet_tips, Tips); + add_benchmark!(params, batches, pallet_transaction_storage, TransactionStorage); add_benchmark!(params, batches, pallet_treasury, Treasury); add_benchmark!(params, batches, pallet_uniques, Uniques); add_benchmark!(params, batches, pallet_utility, Utility); diff --git a/bin/node/testing/src/genesis.rs b/bin/node/testing/src/genesis.rs index 905c2f4d70bb2..6f884d1f73b62 100644 --- a/bin/node/testing/src/genesis.rs +++ b/bin/node/testing/src/genesis.rs @@ -120,5 +120,6 @@ pub fn config_endowed( }, pallet_vesting: Default::default(), pallet_gilt: Default::default(), + pallet_transaction_storage: Default::default(), } } diff --git a/client/api/src/client.rs b/client/api/src/client.rs index 4a0940b1f4bd3..79fb4f8844319 100644 --- a/client/api/src/client.rs +++ b/client/api/src/client.rs @@ -84,6 +84,16 @@ pub trait BlockBackend { id: &BlockId ) -> sp_blockchain::Result::Extrinsic>>>; + /// Get all indexed transactions for a block, + /// including renewed transactions. + /// + /// Note that this will only fetch transactions + /// that are indexed by the runtime with `storage_index_transaction`. + fn block_indexed_body( + &self, + id: &BlockId, + ) -> sp_blockchain::Result>>>; + /// Get full block by id. fn block(&self, id: &BlockId) -> sp_blockchain::Result>>; diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index d756e1cc0bbc4..0d40bb3354cc3 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -419,6 +419,13 @@ impl blockchain::Backend for Blockchain { ) -> sp_blockchain::Result>> { unimplemented!("Not supported by the in-mem backend.") } + + fn block_indexed_body( + &self, + _id: BlockId + ) -> sp_blockchain::Result>>> { + unimplemented!("Not supported by the in-mem backend.") + } } impl blockchain::ProvideCache for Blockchain { diff --git a/client/api/src/lib.rs b/client/api/src/lib.rs index 0f860b95e7805..f3cef0e36ff47 100644 --- a/client/api/src/lib.rs +++ b/client/api/src/lib.rs @@ -38,6 +38,7 @@ pub use client::*; pub use light::*; pub use notifications::*; pub use proof_provider::*; +pub use sp_blockchain::HeaderBackend; pub use sp_state_machine::{StorageProof, ExecutionStrategy}; diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 9a334f95d49a1..cda197ab0687a 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -67,7 +67,7 @@ use codec::{Decode, Encode}; use hash_db::Prefix; use sp_trie::{MemoryDB, PrefixedMemoryDB, prefixed_key}; use sp_database::Transaction; -use sp_core::{Hasher, ChangesTrieConfiguration}; +use sp_core::ChangesTrieConfiguration; use sp_core::offchain::OffchainOverlayedChange; use sp_core::storage::{well_known_keys, ChildInfo}; use sp_arithmetic::traits::Saturating; @@ -591,6 +591,37 @@ impl sc_client_api::blockchain::Backend for BlockchainDb ClientResult { Ok(self.db.contains(columns::TRANSACTION, hash.as_ref())) } + + fn block_indexed_body(&self, id: BlockId) -> ClientResult>>> { + match self.transaction_storage { + TransactionStorageMode::BlockBody => Ok(None), + TransactionStorageMode::StorageChain => { + let body = match read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY, id)? { + Some(body) => body, + None => return Ok(None), + }; + match Vec::::decode(&mut &body[..]) { + Ok(index) => { + let mut transactions = Vec::new(); + for ExtrinsicHeader { indexed_hash, .. } in index.into_iter() { + if indexed_hash != Default::default() { + match self.db.get(columns::TRANSACTION, indexed_hash.as_ref()) { + Some(t) => transactions.push(t), + None => return Err(sp_blockchain::Error::Backend( + format!("Missing indexed transaction {:?}", indexed_hash)) + ) + } + } + } + Ok(Some(transactions)) + } + Err(err) => return Err(sp_blockchain::Error::Backend( + format!("Error decoding body list: {}", err) + )), + } + } + } + } } impl sc_client_api::blockchain::ProvideCache for BlockchainDb { @@ -1624,10 +1655,10 @@ fn apply_index_ops( let mut renewed_map = HashMap::new(); for op in ops { match op { - IndexOperation::Insert { extrinsic, offset } => { - index_map.insert(extrinsic, offset); + IndexOperation::Insert { extrinsic, hash, size } => { + index_map.insert(extrinsic, (hash, size)); } - IndexOperation::Renew { extrinsic, hash, .. } => { + IndexOperation::Renew { extrinsic, hash } => { renewed_map.insert(extrinsic, DbHash::from_slice(hash.as_ref())); } } @@ -1643,9 +1674,8 @@ fn apply_index_ops( } } else { match index_map.get(&(index as u32)) { - Some(offset) if *offset as usize <= extrinsic.len() => { - let offset = *offset as usize; - let hash = HashFor::::hash(&extrinsic[offset..]); + Some((hash, size)) if *size as usize <= extrinsic.len() => { + let offset = extrinsic.len() - *size as usize; transaction.store( columns::TRANSACTION, DbHash::from_slice(hash.as_ref()), @@ -3024,13 +3054,16 @@ pub(crate) mod tests { for i in 0 .. 10 { let mut index = Vec::new(); if i == 0 { - index.push(IndexOperation::Insert { extrinsic: 0, offset: 1 }); + index.push(IndexOperation::Insert { + extrinsic: 0, + hash: x1_hash.as_ref().to_vec(), + size: (x1.len() - 1) as u32, + }); } else if i < 5 { // keep renewing 1st index.push(IndexOperation::Renew { extrinsic: 0, hash: x1_hash.as_ref().to_vec(), - size: (x1.len() - 1) as u32, }); } // else stop renewing let hash = insert_block( diff --git a/client/light/src/blockchain.rs b/client/light/src/blockchain.rs index 3349adf7ac693..242839833a541 100644 --- a/client/light/src/blockchain.rs +++ b/client/light/src/blockchain.rs @@ -135,6 +135,13 @@ impl BlockchainBackend for Blockchain where Block: BlockT, S ) -> ClientResult>> { Err(ClientError::NotAvailableOnLightClient) } + + fn block_indexed_body( + &self, + _id: BlockId + ) -> sp_blockchain::Result>>> { + Err(ClientError::NotAvailableOnLightClient) + } } impl, Block: BlockT> ProvideCache for Blockchain { diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index cff05390d7874..6a98cf82f3e55 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -65,6 +65,7 @@ codec = { package = "parity-scale-codec", version = "2.0.0" } sc-executor = { version = "0.9.0", path = "../executor" } sc-transaction-pool = { version = "3.0.0", path = "../transaction-pool" } sp-transaction-pool = { version = "3.0.0", path = "../../primitives/transaction-pool" } +sp-transaction-storage-proof = { version = "3.0.0", path = "../../primitives/transaction-storage-proof" } sc-rpc-server = { version = "3.0.0", path = "../rpc-servers" } sc-rpc = { version = "3.0.0", path = "../rpc" } sc-block-builder = { version = "0.9.0", path = "../block-builder" } diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index b294be2268997..06d9aec4e4fd3 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -1982,6 +1982,13 @@ impl BlockBackend for Client fn has_indexed_transaction(&self, hash: &Block::Hash) -> sp_blockchain::Result { self.backend.blockchain().has_indexed_transaction(hash) } + + fn block_indexed_body( + &self, + id: &BlockId + ) -> sp_blockchain::Result>>> { + self.backend.blockchain().block_indexed_body(*id) + } } impl backend::AuxStore for Client @@ -2050,3 +2057,26 @@ impl sp_consensus::block_validation::Chain for Client) } } + +impl sp_transaction_storage_proof::IndexedBody for Client +where + BE: backend::Backend, + E: CallExecutor, + B: BlockT, +{ + fn block_indexed_body( + &self, + number: NumberFor, + ) ->Result>>, sp_transaction_storage_proof::Error> { + self.backend.blockchain().block_indexed_body(BlockId::number(number)) + .map_err(|e| sp_transaction_storage_proof::Error::Application(Box::new(e))) + } + + fn number( + &self, + hash: B::Hash, + ) -> Result>, sp_transaction_storage_proof::Error> { + self.backend.blockchain().number(hash) + .map_err(|e| sp_transaction_storage_proof::Error::Application(Box::new(e))) + } +} diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index 63f65db366651..8160bd5d1dd21 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -736,17 +736,20 @@ macro_rules! impl_benchmark { SelectedBenchmark as $crate::BenchmarkingSetup >::components(&selected_benchmark); + let mut progress = $crate::benchmarking::current_time(); // Default number of steps for a component. let mut prev_steps = 10; - let repeat_benchmark = | + let mut repeat_benchmark = | repeat: u32, c: &[($crate::BenchmarkParameter, u32)], results: &mut $crate::Vec<$crate::BenchmarkResults>, verify: bool, + step: u32, + num_steps: u32, | -> Result<(), &'static str> { // Run the benchmark `repeat` times. - for _ in 0..repeat { + for r in 0..repeat { // Set up the externalities environment for the setup we want to // benchmark. let closure_to_benchmark = < @@ -801,6 +804,20 @@ macro_rules! impl_benchmark { "Read/Write Count {:?}", read_write_count ); + let time = $crate::benchmarking::current_time(); + if time.saturating_sub(progress) > 5000000000 { + progress = $crate::benchmarking::current_time(); + $crate::log::info!( + target: "benchmark", + "Benchmarking {} {}/{}, run {}/{}", + extrinsic, + step, + num_steps, + r, + repeat, + ); + } + // Time the storage root recalculation. let start_storage_root = $crate::benchmarking::current_time(); $crate::storage_root(); @@ -829,9 +846,9 @@ macro_rules! impl_benchmark { if components.is_empty() { if verify { // If `--verify` is used, run the benchmark once to verify it would complete. - repeat_benchmark(1, Default::default(), &mut $crate::Vec::new(), true)?; + repeat_benchmark(1, Default::default(), &mut $crate::Vec::new(), true, 1, 1)?; } - repeat_benchmark(repeat, Default::default(), &mut results, false)?; + repeat_benchmark(repeat, Default::default(), &mut results, false, 1, 1)?; } else { // Select the component we will be benchmarking. Each component will be benchmarked. for (idx, (name, low, high)) in components.iter().enumerate() { @@ -869,9 +886,9 @@ macro_rules! impl_benchmark { if verify { // If `--verify` is used, run the benchmark once to verify it would complete. - repeat_benchmark(1, &c, &mut $crate::Vec::new(), true)?; + repeat_benchmark(1, &c, &mut $crate::Vec::new(), true, s, num_of_steps)?; } - repeat_benchmark(repeat, &c, &mut results, false)?; + repeat_benchmark(repeat, &c, &mut results, false, s, num_of_steps)?; } } } diff --git a/frame/transaction-storage/Cargo.toml b/frame/transaction-storage/Cargo.toml new file mode 100644 index 0000000000000..8892e234d436f --- /dev/null +++ b/frame/transaction-storage/Cargo.toml @@ -0,0 +1,50 @@ +[package] +name = "pallet-transaction-storage" +version = "3.0.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "Unlicense" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Storage chain pallet" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +serde = { version = "1.0.101", optional = true } +hex-literal = { version = "0.3.1", optional = true } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } +pallet-balances = { version = "3.0.0", default-features = false, path = "../balances" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } +sp-inherents = { version = "3.0.0", default-features = false, path = "../../primitives/inherents" } +sp-transaction-storage-proof = { version = "3.0.0", default-features = false, path = "../../primitives/transaction-storage-proof" } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } + +[dev-dependencies] +frame-support-test = { version = "3.0.0", path = "../support/test" } +sp-transaction-storage-proof = { version = "3.0.0", default-features = true, path = "../../primitives/transaction-storage-proof" } +sp-core = { version = "3.0.0", path = "../../primitives/core", default-features = false } + +[features] +default = ["std"] +runtime-benchmarks = [ + "frame-benchmarking", + "hex-literal", +] +std = [ + "serde", + "codec/std", + "sp-runtime/std", + "frame-support/std", + "frame-system/std", + "pallet-balances/std", + "sp-io/std", + "sp-std/std", + "sp-inherents/std", +] diff --git a/frame/transaction-storage/README.md b/frame/transaction-storage/README.md new file mode 100644 index 0000000000000..a4f77797f5efd --- /dev/null +++ b/frame/transaction-storage/README.md @@ -0,0 +1,8 @@ +# Transaction Storage Pallet + +Indexes transactions and manages storage proofs. +# Transaction Storage Pallet + +Indexes transactions and manages storage proofs. + +License: Apache-2.0 diff --git a/frame/transaction-storage/src/benchmarking.rs b/frame/transaction-storage/src/benchmarking.rs new file mode 100644 index 0000000000000..ffb4d23de119f --- /dev/null +++ b/frame/transaction-storage/src/benchmarking.rs @@ -0,0 +1,147 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for transaction-storage Pallet + +#![cfg(feature = "runtime-benchmarks")] + +use sp_std::*; +use super::*; +use sp_runtime::traits::{Zero, One, Bounded}; +use sp_transaction_storage_proof::TransactionStorageProof; +use frame_system::{RawOrigin, Pallet as System, EventRecord}; +use frame_benchmarking::{benchmarks, whitelisted_caller, impl_benchmark_test_suite}; +use frame_support::{traits::{Currency, OnFinalize, OnInitialize}}; + +use crate::Pallet as TransactionStorage; + +const PROOF: &[u8] = &hex_literal::hex!(" + 0104000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 000000000000000000000000000000014cd0780ffff80e316a478e2f1fcb13cf22fd0b2dbb54a6f53cff93f3ca2f3dfe8 + 7d12a3662c4c0080e316a478e2f1fcb13cf22fd0b2dbb54a6f53cff93f3ca2f3dfe87d12a3662c4c80e316a478e2f1fcb + 13cf22fd0b2dbb54a6f53cff93f3ca2f3dfe87d12a3662c4c80e316a478e2f1fcb13cf22fd0b2dbb54a6f53cff93f3ca2 + f3dfe87d12a3662c4c80e316a478e2f1fcb13cf22fd0b2dbb54a6f53cff93f3ca2f3dfe87d12a3662c4c80e316a478e2f + 1fcb13cf22fd0b2dbb54a6f53cff93f3ca2f3dfe87d12a3662c4c80e316a478e2f1fcb13cf22fd0b2dbb54a6f53cff93f + 3ca2f3dfe87d12a3662c4c80e316a478e2f1fcb13cf22fd0b2dbb54a6f53cff93f3ca2f3dfe87d12a3662c4c80e316a47 + 8e2f1fcb13cf22fd0b2dbb54a6f53cff93f3ca2f3dfe87d12a3662c4c80e316a478e2f1fcb13cf22fd0b2dbb54a6f53cf + f93f3ca2f3dfe87d12a3662c4c80e316a478e2f1fcb13cf22fd0b2dbb54a6f53cff93f3ca2f3dfe87d12a3662c4c80e31 + 6a478e2f1fcb13cf22fd0b2dbb54a6f53cff93f3ca2f3dfe87d12a3662c4c80e316a478e2f1fcb13cf22fd0b2dbb54a6f + 53cff93f3ca2f3dfe87d12a3662c4c80e316a478e2f1fcb13cf22fd0b2dbb54a6f53cff93f3ca2f3dfe87d12a3662c4c8 + 0e316a478e2f1fcb13cf22fd0b2dbb54a6f53cff93f3ca2f3dfe87d12a3662c4cbd05807777809a5d7a720ce5f9d9a012 + fbf25e92c30e732dadba8f312b05e02976313ea64d9f807d43bcbf8a3dc2f6b9e957d129e610c06d411e11743062dc1cf + 3ac289390ae4c8008592aa2d915f52941036afbe72bac4ebe7ce186c4ddc53f118e0ddd4decd8cc809a5d7a720ce5f9d9 + a012fbf25e92c30e732dadba8f312b05e02976313ea64d9f807d43bcbf8a3dc2f6b9e957d129e610c06d411e11743062d + c1cf3ac289390ae4c00809a5d7a720ce5f9d9a012fbf25e92c30e732dadba8f312b05e02976313ea64d9f807d43bcbf8a + 3dc2f6b9e957d129e610c06d411e11743062dc1cf3ac289390ae4c8008592aa2d915f52941036afbe72bac4ebe7ce186c + 4ddc53f118e0ddd4decd8cc809a5d7a720ce5f9d9a012fbf25e92c30e732dadba8f312b05e02976313ea64d9f807d43bc + bf8a3dc2f6b9e957d129e610c06d411e11743062dc1cf3ac289390ae4c8008592aa2d915f52941036afbe72bac4ebe7ce + 186c4ddc53f118e0ddd4decd8cccd0780ffff8081b825bfa9b2ba8f5f253515e7db09eb1ad3d4f02f322d3aa1398e0cb0 + 3bdb31008081b825bfa9b2ba8f5f253515e7db09eb1ad3d4f02f322d3aa1398e0cb03bdb318081b825bfa9b2ba8f5f253 + 515e7db09eb1ad3d4f02f322d3aa1398e0cb03bdb318081b825bfa9b2ba8f5f253515e7db09eb1ad3d4f02f322d3aa139 + 8e0cb03bdb318081b825bfa9b2ba8f5f253515e7db09eb1ad3d4f02f322d3aa1398e0cb03bdb318081b825bfa9b2ba8f5 + f253515e7db09eb1ad3d4f02f322d3aa1398e0cb03bdb318081b825bfa9b2ba8f5f253515e7db09eb1ad3d4f02f322d3a + a1398e0cb03bdb318081b825bfa9b2ba8f5f253515e7db09eb1ad3d4f02f322d3aa1398e0cb03bdb318081b825bfa9b2b + a8f5f253515e7db09eb1ad3d4f02f322d3aa1398e0cb03bdb318081b825bfa9b2ba8f5f253515e7db09eb1ad3d4f02f32 + 2d3aa1398e0cb03bdb318081b825bfa9b2ba8f5f253515e7db09eb1ad3d4f02f322d3aa1398e0cb03bdb318081b825bfa + 9b2ba8f5f253515e7db09eb1ad3d4f02f322d3aa1398e0cb03bdb318081b825bfa9b2ba8f5f253515e7db09eb1ad3d4f0 + 2f322d3aa1398e0cb03bdb318081b825bfa9b2ba8f5f253515e7db09eb1ad3d4f02f322d3aa1398e0cb03bdb318081b82 + 5bfa9b2ba8f5f253515e7db09eb1ad3d4f02f322d3aa1398e0cb03bdb31cd0780ffff80b4f23ac50c8e67d9b280f2b31a + 5707d52b892977acaac84d530bd188544c5f9b80b4f23ac50c8e67d9b280f2b31a5707d52b892977acaac84d530bd1885 + 44c5f9b80b4f23ac50c8e67d9b280f2b31a5707d52b892977acaac84d530bd188544c5f9b80b4f23ac50c8e67d9b280f2 + b31a5707d52b892977acaac84d530bd188544c5f9b80b4f23ac50c8e67d9b280f2b31a5707d52b892977acaac84d530bd + 188544c5f9b0080b4f23ac50c8e67d9b280f2b31a5707d52b892977acaac84d530bd188544c5f9b80b4f23ac50c8e67d9 + b280f2b31a5707d52b892977acaac84d530bd188544c5f9b80b4f23ac50c8e67d9b280f2b31a5707d52b892977acaac84 + d530bd188544c5f9b80b4f23ac50c8e67d9b280f2b31a5707d52b892977acaac84d530bd188544c5f9b80b4f23ac50c8e + 67d9b280f2b31a5707d52b892977acaac84d530bd188544c5f9b80b4f23ac50c8e67d9b280f2b31a5707d52b892977aca + ac84d530bd188544c5f9b80b4f23ac50c8e67d9b280f2b31a5707d52b892977acaac84d530bd188544c5f9b80b4f23ac5 + 0c8e67d9b280f2b31a5707d52b892977acaac84d530bd188544c5f9b80b4f23ac50c8e67d9b280f2b31a5707d52b89297 + 7acaac84d530bd188544c5f9b80b4f23ac50c8e67d9b280f2b31a5707d52b892977acaac84d530bd188544c5f9b104401 + 0000 +"); + +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; + +fn assert_last_event(generic_event: ::Event) { + let events = System::::events(); + let system_event: ::Event = generic_event.into(); + let EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); +} + +pub fn run_to_block(n: T::BlockNumber) { + while frame_system::Pallet::::block_number() < n { + crate::Pallet::::on_finalize(frame_system::Pallet::::block_number()); + frame_system::Pallet::::on_finalize(frame_system::Pallet::::block_number()); + frame_system::Pallet::::set_block_number(frame_system::Pallet::::block_number() + One::one()); + frame_system::Pallet::::on_initialize(frame_system::Pallet::::block_number()); + crate::Pallet::::on_initialize(frame_system::Pallet::::block_number()); + } +} + +benchmarks! { + store { + let l in 1 .. MaxTransactionSize::::get(); + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + }: _(RawOrigin::Signed(caller.clone()), vec![0u8; l as usize]) + verify { + assert!(!BlockTransactions::::get().is_empty()); + assert_last_event::(Event::Stored(0).into()); + } + + renew { + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + TransactionStorage::::store( + RawOrigin::Signed(caller.clone()).into(), + vec![0u8; MaxTransactionSize::::get() as usize], + )?; + run_to_block::(1u32.into()); + }: _(RawOrigin::Signed(caller.clone()), T::BlockNumber::zero(), 0) + verify { + assert_last_event::(Event::Renewed(0).into()); + } + + check_proof_max { + run_to_block::(1u32.into()); + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + for _ in 0 .. MaxBlockTransactions::::get() { + TransactionStorage::::store( + RawOrigin::Signed(caller.clone()).into(), + vec![0u8; MaxTransactionSize::::get() as usize], + )?; + } + run_to_block::(StoragePeriod::::get() + T::BlockNumber::one()); + let random_hash = [0u8]; + let mut encoded_proof = PROOF; + let proof = TransactionStorageProof::decode(&mut encoded_proof).unwrap(); + }: check_proof(RawOrigin::None, proof) + verify { + assert_last_event::(Event::ProofChecked.into()); + } +} + +impl_benchmark_test_suite!( + TransactionStorage, + crate::mock::new_test_ext(), + crate::mock::Test, +); diff --git a/frame/transaction-storage/src/lib.rs b/frame/transaction-storage/src/lib.rs new file mode 100644 index 0000000000000..ef824a8399f57 --- /dev/null +++ b/frame/transaction-storage/src/lib.rs @@ -0,0 +1,436 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Transaction storage pallet. Indexes transactions and manages storage proofs. + +// Ensure we're `no_std` when compiling for Wasm. +#![cfg_attr(not(feature = "std"), no_std)] + +mod benchmarking; +pub mod weights; + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +use frame_support::{ + traits::{ReservableCurrency, Currency, OnUnbalanced}, + dispatch::{Dispatchable, GetDispatchInfo}, +}; +use sp_std::prelude::*; +use sp_std::{result}; +use codec::{Encode, Decode}; +use sp_runtime::traits::{Saturating, BlakeTwo256, Hash, Zero, One}; +use sp_transaction_storage_proof::{ + TransactionStorageProof, InherentError, + random_chunk, encode_index, + CHUNK_SIZE, INHERENT_IDENTIFIER, DEFAULT_STORAGE_PERIOD, +}; + +/// A type alias for the balance type from this pallet's point of view. +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type NegativeImbalanceOf = <::Currency as Currency<::AccountId>> + ::NegativeImbalance; + +// Re-export pallet items so that they can be accessed from the crate namespace. +pub use pallet::*; +pub use weights::WeightInfo; + +/// Maximum bytes that can be stored in one transaction. +// Setting higher limit also requires raising the allocator limit. +pub const DEFAULT_MAX_TRANSACTION_SIZE: u32 = 8 * 1024 * 1024; +pub const DEFAULT_MAX_BLOCK_TRANSACTIONS: u32 = 512; + +/// State data for a stored transaction. +#[derive(Encode, Decode, Clone, sp_runtime::RuntimeDebug, PartialEq, Eq)] +pub struct TransactionInfo { + /// Chunk trie root. + chunk_root: ::Output, + /// Plain hash of indexed data. + content_hash: ::Output, + /// Size of indexed data in bytes. + size: u32, + /// Total number of chunks added in the block with this transaction. This + /// is used find transaction info by block chunk index using binary search. + block_chunks: u32, +} + +fn num_chunks(bytes: u32) -> u32 { + ((bytes as u64 + CHUNK_SIZE as u64 - 1) / CHUNK_SIZE as u64) as u32 +} + +#[frame_support::pallet] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; + /// A dispatchable call. + type Call: Parameter + Dispatchable + GetDispatchInfo + From>; + /// The currency trait. + type Currency: ReservableCurrency; + /// Handler for the unbalanced decrease when fees are burned. + type FeeDestination: OnUnbalanced>; + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + } + + #[pallet::error] + pub enum Error { + /// Insufficient account balance. + InsufficientFunds, + /// Invalid configuration. + NotConfigured, + /// Renewed extrinsic is not found. + RenewedNotFound, + /// Attempting to store empty transaction + EmptyTransaction, + /// Proof was not expected in this block. + UnexpectedProof, + /// Proof failed verification. + InvalidProof, + /// Missing storage proof. + MissingProof, + /// Unable to verify proof becasue state data is missing. + MissingStateData, + /// Double proof check in the block. + DoubleCheck, + /// Storage proof was not checked in the block. + ProofNotChecked, + /// Transaction is too large. + TransactionTooLarge, + /// Too many transactions in the block. + TooManyTransactions, + /// Attempted to call `store` outside of block execution. + BadContext, + } + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(n: T::BlockNumber) -> Weight { + // Drop obsolete roots. The proof for `obsolete` will be checked later + // in this block, so we drop `obsolete` - 1. + let period = >::get(); + let obsolete = n.saturating_sub(period.saturating_add(One::one())); + if obsolete > Zero::zero() { + >::remove(obsolete); + >::remove(obsolete); + } + // 2 writes in `on_initialize` and 2 writes + 2 reads in `on_finalize` + T::DbWeight::get().reads_writes(2, 4) + } + + fn on_finalize(n: T::BlockNumber) { + assert!( + >::take() + || { + // Proof is not required for early or empty blocks. + let number = >::block_number(); + let period = >::get(); + let target_number = number.saturating_sub(period); + target_number.is_zero() || >::get(target_number) == 0 + }, + "Storage proof must be checked once in the block" + ); + // Insert new transactions + let transactions = >::take(); + let total_chunks = transactions.last().map_or(0, |t| t.block_chunks); + if total_chunks != 0 { + >::insert(n, total_chunks); + >::insert(n, transactions); + } + } + } + + #[pallet::call] + impl Pallet { + /// Index and store data on chain. Minimum data size is 1 bytes, maximum is `MaxTransactionSize`. + /// Data will be removed after `STORAGE_PERIOD` blocks, unless `renew` is called. + /// # + /// - n*log(n) of data size, as all data is pushed to an in-memory trie. + /// Additionally contains a DB write. + /// # + #[pallet::weight(T::WeightInfo::store(data.len() as u32))] + pub(super) fn store( + origin: OriginFor, + data: Vec, + ) -> DispatchResult { + ensure!(data.len() > 0, Error::::EmptyTransaction); + ensure!(data.len() <= MaxTransactionSize::::get() as usize, Error::::TransactionTooLarge); + let sender = ensure_signed(origin)?; + Self::apply_fee(sender, data.len() as u32)?; + + // Chunk data and compute storage root + let chunk_count = num_chunks(data.len() as u32); + let chunks = data.chunks(CHUNK_SIZE).map(|c| c.to_vec()).collect(); + let root = sp_io::trie::blake2_256_ordered_root(chunks); + + let content_hash = sp_io::hashing::blake2_256(&data); + let extrinsic_index = >::extrinsic_index().ok_or_else( + || Error::::BadContext)?; + sp_io::transaction_index::index(extrinsic_index, data.len() as u32, content_hash); + + let mut index = 0; + >::mutate(|transactions| { + if transactions.len() + 1 > MaxBlockTransactions::::get() as usize { + return Err(Error::::TooManyTransactions) + } + let total_chunks = transactions.last().map_or(0, |t| t.block_chunks) + chunk_count; + index = transactions.len() as u32; + transactions.push(TransactionInfo { + chunk_root: root, + size: data.len() as u32, + content_hash: content_hash.into(), + block_chunks: total_chunks, + }); + Ok(()) + })?; + Self::deposit_event(Event::Stored(index)); + Ok(()) + } + + /// Renew previously stored data. Parameters are the block number that contains + /// previous `store` or `renew` call and transaction index within that block. + /// Transaction index is emitted in the `Stored` or `Renewed` event. + /// Applies same fees as `store`. + /// # + /// - Constant. + /// # + #[pallet::weight(T::WeightInfo::renew())] + pub(super) fn renew( + origin: OriginFor, + block: T::BlockNumber, + index: u32, + ) -> DispatchResultWithPostInfo { + let sender = ensure_signed(origin)?; + let transactions = >::get(block).ok_or(Error::::RenewedNotFound)?; + let info = transactions.get(index as usize).ok_or(Error::::RenewedNotFound)?; + Self::apply_fee(sender, info.size)?; + + let extrinsic_index = >::extrinsic_index().unwrap(); + sp_io::transaction_index::renew(extrinsic_index, info.content_hash.into()); + + let mut index = 0; + >::mutate(|transactions| { + if transactions.len() + 1 > MaxBlockTransactions::::get() as usize { + return Err(Error::::TooManyTransactions) + } + let chunks = num_chunks(info.size); + let total_chunks = transactions.last().map_or(0, |t| t.block_chunks) + chunks; + index = transactions.len() as u32; + transactions.push(TransactionInfo { + chunk_root: info.chunk_root, + size: info.size, + content_hash: info.content_hash, + block_chunks: total_chunks, + }); + Ok(()) + })?; + Self::deposit_event(Event::Renewed(index)); + Ok(().into()) + } + + /// Check storage proof for block number `block_number() - StoragePeriod`. + /// If such block does not exist the proof is expected to be `None`. + /// # + /// - Linear w.r.t the number of indexed transactions in the proved block for random probing. + /// There's a DB read for each transaction. + /// Here we assume a maximum of 100 probed transactions. + /// # + #[pallet::weight((T::WeightInfo::check_proof_max(), DispatchClass::Mandatory))] + pub(super) fn check_proof( + origin: OriginFor, + proof: TransactionStorageProof, + ) -> DispatchResultWithPostInfo { + ensure_none(origin)?; + ensure!(!ProofChecked::::get(), Error::::DoubleCheck); + let number = >::block_number(); + let period = >::get(); + let target_number = number.saturating_sub(period); + ensure!(!target_number.is_zero(), Error::::UnexpectedProof); + let total_chunks = >::get(target_number); + ensure!(total_chunks != 0, Error::::UnexpectedProof); + let parent_hash = >::parent_hash(); + let selected_chunk_index = random_chunk(parent_hash.as_ref(), total_chunks); + let (info, chunk_index) = match >::get(target_number) { + Some(infos) => { + let index = match infos.binary_search_by_key(&selected_chunk_index, |info| info.block_chunks) { + Ok(index) => index, + Err(index) => index, + }; + let info = infos.get(index).ok_or_else(|| Error::::MissingStateData)?.clone(); + let chunks = num_chunks(info.size); + let prev_chunks = info.block_chunks - chunks; + (info, selected_chunk_index - prev_chunks) + }, + None => Err(Error::::MissingStateData)?, + }; + ensure!( + sp_io::trie::blake2_256_verify_proof( + info.chunk_root, + &proof.proof, + &encode_index(chunk_index), + &proof.chunk, + ), + Error::::InvalidProof + ); + ProofChecked::::put(true); + Self::deposit_event(Event::ProofChecked); + Ok(().into()) + } + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Stored data under specified index. + Stored(u32), + /// Renewed data under specified index. + Renewed(u32), + /// Storage proof was successfully checked. + ProofChecked, + } + + /// Collection of transaction metadata by block number. + #[pallet::storage] + #[pallet::getter(fn transaction_roots)] + pub(super) type Transactions = StorageMap< + _, + Blake2_128Concat, + T::BlockNumber, + Vec, + OptionQuery, + >; + + /// Count indexed chunks for each block. + #[pallet::storage] + pub(super) type ChunkCount = StorageMap< + _, + Blake2_128Concat, + T::BlockNumber, + u32, + ValueQuery, + >; + + #[pallet::storage] + #[pallet::getter(fn byte_fee)] + /// Storage fee per byte. + pub(super) type ByteFee = StorageValue<_, BalanceOf>; + + #[pallet::storage] + #[pallet::getter(fn entry_fee)] + /// Storage fee per transaction. + pub(super) type EntryFee = StorageValue<_, BalanceOf>; + + #[pallet::storage] + #[pallet::getter(fn max_transaction_size)] + /// Maximum data set in a single transaction in bytes. + pub(super) type MaxTransactionSize = StorageValue<_, u32, ValueQuery>; + + #[pallet::storage] + #[pallet::getter(fn max_block_transactions)] + /// Maximum number of indexed transactions in the block. + pub(super) type MaxBlockTransactions = StorageValue<_, u32, ValueQuery>; + + /// Storage period for data in blocks. Should match `sp_storage_proof::DEFAULT_STORAGE_PERIOD` + /// for block authoring. + #[pallet::storage] + pub(super) type StoragePeriod = StorageValue<_, T::BlockNumber, ValueQuery>; + + // Intermediates + #[pallet::storage] + pub(super) type BlockTransactions = StorageValue<_, Vec, ValueQuery>; + + /// Was the proof checked in this block? + #[pallet::storage] + pub(super) type ProofChecked = StorageValue<_, bool, ValueQuery>; + + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub byte_fee: BalanceOf, + pub entry_fee: BalanceOf, + pub storage_period: T::BlockNumber, + pub max_block_transactions: u32, + pub max_transaction_size: u32, + } + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self { + byte_fee: 10u32.into(), + entry_fee: 1000u32.into(), + storage_period: DEFAULT_STORAGE_PERIOD.into(), + max_block_transactions: DEFAULT_MAX_BLOCK_TRANSACTIONS, + max_transaction_size: DEFAULT_MAX_TRANSACTION_SIZE, + } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + >::put(&self.byte_fee); + >::put(&self.entry_fee); + >::put(&self.max_transaction_size); + >::put(&self.max_block_transactions); + >::put(&self.storage_period); + } + } + + #[pallet::inherent] + impl ProvideInherent for Pallet { + type Call = Call; + type Error = InherentError; + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(data: &InherentData) -> Option { + let proof = data.get_data::(&Self::INHERENT_IDENTIFIER).unwrap_or(None); + proof.map(Call::check_proof) + } + + fn check_inherent(_call: &Self::Call, _data: &InherentData) -> result::Result<(), Self::Error> { + Ok(()) + } + + fn is_inherent(call: &Self::Call) -> bool { + matches!(call, Call::check_proof(_)) + } + } + + impl Pallet { + fn apply_fee(sender: T::AccountId, size: u32) -> DispatchResult { + let byte_fee = ByteFee::::get().ok_or(Error::::NotConfigured)?; + let entry_fee = EntryFee::::get().ok_or(Error::::NotConfigured)?; + let fee = byte_fee.saturating_mul(size.into()).saturating_add(entry_fee); + ensure!(T::Currency::can_slash(&sender, fee), Error::::InsufficientFunds); + let (credit, _) = T::Currency::slash(&sender, fee); + T::FeeDestination::on_unbalanced(credit); + Ok(()) + } + } +} diff --git a/frame/transaction-storage/src/mock.rs b/frame/transaction-storage/src/mock.rs new file mode 100644 index 0000000000000..51eb61dd26b78 --- /dev/null +++ b/frame/transaction-storage/src/mock.rs @@ -0,0 +1,129 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test environment for transaction-storage pallet. + +use crate as pallet_transaction_storage; +use crate::TransactionStorageProof; +use sp_core::H256; +use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header, BuildStorage}; +use frame_support::{ + parameter_types, + traits::{OnInitialize, OnFinalize}, +}; + + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +pub type Block = frame_system::mocking::MockBlock; + +// Configure a mock runtime to test the pallet. +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Config, Storage, Event}, + TransactionStorage: pallet_transaction_storage::{ + Pallet, Call, Storage, Config, Inherent, Event + }, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const SS58Prefix: u8 = 42; +} + +impl frame_system::Config for Test { + type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type Origin = Origin; + type Call = Call; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type DbWeight = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = SS58Prefix; + type OnSetCode = (); +} + +parameter_types! { + pub const ExistentialDeposit: u64 = 1; +} + +impl pallet_balances::Config for Test { + type Balance = u64; + type DustRemoval = (); + type Event = Event; + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); + type MaxLocks = (); +} + +impl pallet_transaction_storage::Config for Test { + type Event = Event; + type Call = Call; + type Currency = Balances; + type FeeDestination = (); + type WeightInfo = (); +} + +pub fn new_test_ext() -> sp_io::TestExternalities { + let t = GenesisConfig { + frame_system: Default::default(), + pallet_balances: pallet_balances::GenesisConfig:: { + balances: vec![(1, 1000000000), (2, 100), (3, 100), (4, 100)] + }, + pallet_transaction_storage: pallet_transaction_storage::GenesisConfig:: { + storage_period: 10, + byte_fee: 2, + entry_fee: 200, + max_block_transactions: crate::DEFAULT_MAX_BLOCK_TRANSACTIONS, + max_transaction_size: crate::DEFAULT_MAX_TRANSACTION_SIZE, + }, + }.build_storage().unwrap(); + t.into() +} + +pub fn run_to_block(n: u64, f: impl Fn() -> Option) { + while System::block_number() < n { + if let Some(proof) = f() { + TransactionStorage::check_proof(Origin::none(), proof).unwrap(); + } + TransactionStorage::on_finalize(System::block_number()); + System::on_finalize(System::block_number()); + System::set_block_number(System::block_number() + 1); + System::on_initialize(System::block_number()); + TransactionStorage::on_initialize(System::block_number()); + } +} diff --git a/frame/transaction-storage/src/tests.rs b/frame/transaction-storage/src/tests.rs new file mode 100644 index 0000000000000..50594f1bce9dc --- /dev/null +++ b/frame/transaction-storage/src/tests.rs @@ -0,0 +1,157 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for transction-storage pallet. + +use super::*; +use crate::mock::*; +use super::Pallet as TransactionStorage; +use frame_support::{assert_ok, assert_noop}; +use frame_system::RawOrigin; +use sp_transaction_storage_proof::registration::build_proof; + +const MAX_DATA_SIZE: u32 = DEFAULT_MAX_TRANSACTION_SIZE; + +#[test] +fn discards_data() { + new_test_ext().execute_with(|| { + run_to_block(1, || None); + let caller = 1; + assert_ok!(TransactionStorage::::store( + RawOrigin::Signed(caller.clone()).into(), + vec![0u8; 2000 as usize] + )); + assert_ok!(TransactionStorage::::store( + RawOrigin::Signed(caller.clone()).into(), + vec![0u8; 2000 as usize] + )); + let proof_provider = || { + let block_num = >::block_number(); + if block_num == 11 { + let parent_hash = >::parent_hash(); + Some(build_proof(parent_hash.as_ref(), vec![vec![0u8; 2000], vec![0u8; 2000]]).unwrap()) + } else { + None + } + }; + run_to_block(11, proof_provider); + assert!(Transactions::::get(1).is_some()); + let transctions = Transactions::::get(1).unwrap(); + assert_eq!(transctions.len(), 2); + assert_eq!(ChunkCount::::get(1), 16); + run_to_block(12, proof_provider); + assert!(Transactions::::get(1).is_none()); + assert_eq!(ChunkCount::::get(1), 0); + }); +} + +#[test] +fn burns_fee() { + new_test_ext().execute_with(|| { + run_to_block(1, || None); + let caller = 1; + assert_noop!(TransactionStorage::::store( + RawOrigin::Signed(5).into(), + vec![0u8; 2000 as usize] + ), + Error::::InsufficientFunds, + ); + assert_ok!(TransactionStorage::::store( + RawOrigin::Signed(caller.clone()).into(), + vec![0u8; 2000 as usize] + )); + assert_eq!(Balances::free_balance(1), 1_000_000_000 - 2000 * 2 - 200); + }); +} + +#[test] +fn checks_proof() { + new_test_ext().execute_with(|| { + run_to_block(1, || None); + let caller = 1; + assert_ok!(TransactionStorage::::store( + RawOrigin::Signed(caller.clone()).into(), + vec![0u8; MAX_DATA_SIZE as usize] + )); + run_to_block(10, || None); + let parent_hash = >::parent_hash(); + let proof = build_proof( + parent_hash.as_ref(), + vec![vec![0u8; MAX_DATA_SIZE as usize]] + ).unwrap(); + assert_noop!(TransactionStorage::::check_proof( + Origin::none(), + proof, + ), + Error::::UnexpectedProof, + ); + run_to_block(11, || None); + let parent_hash = >::parent_hash(); + + let invalid_proof = build_proof( + parent_hash.as_ref(), + vec![vec![0u8; 1000]] + ).unwrap(); + assert_noop!(TransactionStorage::::check_proof( + Origin::none(), + invalid_proof, + ), + Error::::InvalidProof, + ); + + let proof = build_proof( + parent_hash.as_ref(), + vec![vec![0u8; MAX_DATA_SIZE as usize]] + ).unwrap(); + assert_ok!(TransactionStorage::::check_proof(Origin::none(), proof)); + }); +} + +#[test] +fn renews_data() { + new_test_ext().execute_with(|| { + run_to_block(1, || None); + let caller = 1; + assert_ok!(TransactionStorage::::store( + RawOrigin::Signed(caller.clone()).into(), + vec![0u8; 2000] + )); + let info = BlockTransactions::::get().last().unwrap().clone(); + run_to_block(6, || None); + assert_ok!(TransactionStorage::::renew( + RawOrigin::Signed(caller.clone()).into(), + 1, // block + 0, // transaction + )); + assert_eq!(Balances::free_balance(1), 1_000_000_000 - 4000 * 2 - 200 * 2); + let proof_provider = || { + let block_num = >::block_number(); + if block_num == 11 || block_num == 16 { + let parent_hash = >::parent_hash(); + Some(build_proof(parent_hash.as_ref(), vec![vec![0u8; 2000]]).unwrap()) + } else { + None + } + }; + run_to_block(16, proof_provider); + assert!(Transactions::::get(1).is_none()); + assert_eq!(Transactions::::get(6).unwrap().get(0), Some(info).as_ref()); + run_to_block(17, proof_provider); + assert!(Transactions::::get(6).is_none()); + }); +} + diff --git a/frame/transaction-storage/src/weights.rs b/frame/transaction-storage/src/weights.rs new file mode 100644 index 0000000000000..7951db8828d07 --- /dev/null +++ b/frame/transaction-storage/src/weights.rs @@ -0,0 +1,95 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_transaction_storage +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-06-03, STEPS: `[20, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// ./target/release/substrate +// benchmark +// --chain +// dev +// --steps +// 20 +// --repeat=20 +// --pallet=pallet_transaction_storage +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/transaction-storage/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_transaction_storage. +pub trait WeightInfo { + fn store(l: u32, ) -> Weight; + fn renew() -> Weight; + fn check_proof_max() -> Weight; +} + +/// Weights for pallet_transaction_storage using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn store(l: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 0 + .saturating_add((10_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn renew() -> Weight { + (97_000_000 as Weight) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn check_proof_max() -> Weight { + (99_000_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + fn store(l: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 0 + .saturating_add((10_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn renew() -> Weight { + (97_000_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn check_proof_max() -> Weight { + (99_000_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } +} diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index b00cbada9f476..3441a4f6cf544 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -227,6 +227,8 @@ pub trait Backend: HeaderBackend + HeaderMetadata Result { Ok(self.indexed_transaction(hash)?.is_some()) } + + fn block_indexed_body(&self, id: BlockId) -> Result>>>; } /// Provides access to the optional cache. diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index ce5a0990d738d..14145e8798498 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -229,12 +229,12 @@ pub trait Externalities: ExtensionStore { fn storage_commit_transaction(&mut self) -> Result<(), ()>; /// Index specified transaction slice and store it. - fn storage_index_transaction(&mut self, _index: u32, _offset: u32) { + fn storage_index_transaction(&mut self, _index: u32, _hash: &[u8], _size: u32) { unimplemented!("storage_index_transaction"); } /// Renew existing piece of transaction storage. - fn storage_renew_transaction_index(&mut self, _index: u32, _hash: &[u8], _size: u32) { + fn storage_renew_transaction_index(&mut self, _index: u32, _hash: &[u8]) { unimplemented!("storage_renew_transaction_index"); } diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 72695f2156b67..f0fcc4f1b0672 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -429,6 +429,24 @@ pub trait Trie { fn keccak_256_ordered_root(input: Vec>) -> H256 { Layout::::ordered_trie_root(input) } + + /// Verify trie proof + fn blake2_256_verify_proof(root: H256, proof: &[Vec], key: &[u8], value: &[u8]) -> bool { + sp_trie::verify_trie_proof::, _, _, _>( + &root, + proof, + &[(key, Some(value))], + ).is_ok() + } + + /// Verify trie proof + fn keccak_256_verify_proof(root: H256, proof: &[Vec], key: &[u8], value: &[u8]) -> bool { + sp_trie::verify_trie_proof::, _, _, _>( + &root, + proof, + &[(key, Some(value))], + ).is_ok() + } } /// Interface that provides miscellaneous functions for communicating between the runtime and the node. @@ -824,6 +842,20 @@ pub trait Hashing { } } +/// Interface that provides transaction indexing API. +#[runtime_interface] +pub trait TransactionIndex { + /// Add transaction index. Returns indexed content hash. + fn index(&mut self, extrinsic: u32, size: u32, context_hash: [u8; 32]) { + self.storage_index_transaction(extrinsic, &context_hash, size); + } + + /// Conduct a 512-bit Keccak hash. + fn renew(&mut self, extrinsic: u32, context_hash: [u8; 32]) { + self.storage_renew_transaction_index(extrinsic, &context_hash); + } +} + /// Interface that provides functions to access the Offchain DB. #[runtime_interface] pub trait OffchainIndex { @@ -1434,6 +1466,7 @@ pub type SubstrateHostFunctions = ( crate::trie::HostFunctions, offchain_index::HostFunctions, runtime_tasks::HostFunctions, + transaction_index::HostFunctions, ); #[cfg(test)] diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 2649c320e14dc..8bcf1f28a0778 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -629,33 +629,34 @@ where } } - fn storage_index_transaction(&mut self, index: u32, offset: u32) { + fn storage_index_transaction(&mut self, index: u32, hash: &[u8], size: u32) { trace!( target: "state", - "{:04x}: IndexTransaction ({}): [{}..]", + "{:04x}: IndexTransaction ({}): {}, {} bytes", self.id, index, - offset, + HexDisplay::from(&hash), + size, ); self.overlay.add_transaction_index(IndexOperation::Insert { extrinsic: index, - offset, + hash: hash.to_vec(), + size, }); } /// Renew existing piece of data storage. - fn storage_renew_transaction_index(&mut self, index: u32, hash: &[u8], size: u32) { + fn storage_renew_transaction_index(&mut self, index: u32, hash: &[u8]) { trace!( target: "state", - "{:04x}: RenewTransactionIndex ({}) {} bytes", + "{:04x}: RenewTransactionIndex ({}): {}", self.id, + index, HexDisplay::from(&hash), - size, ); self.overlay.add_transaction_index(IndexOperation::Renew { extrinsic: index, hash: hash.to_vec(), - size }); } diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index 2a3495a4e1c74..c01d56ab919a0 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -118,8 +118,10 @@ pub enum IndexOperation { Insert { /// Extrinsic index in the current block. extrinsic: u32, - /// Data offset in the extrinsic. - offset: u32, + /// Data content hash. + hash: Vec, + /// Indexed data size. + size: u32, }, /// Renew existing transaction storage. Renew { @@ -127,8 +129,6 @@ pub enum IndexOperation { extrinsic: u32, /// Referenced index hash. hash: Vec, - /// Expected data size. - size: u32, } } @@ -520,6 +520,11 @@ impl OverlayedChanges { self.children.get(key).map(|(overlay, info)| (overlay.changes(), info)) } + /// Get an list of all index operations. + pub fn transaction_index_ops(&self) -> &[IndexOperation] { + &self.transaction_index_ops + } + /// Convert this instance with all changes into a [`StorageChanges`] instance. #[cfg(feature = "std")] pub fn into_storage_changes< diff --git a/primitives/transaction-storage-proof/Cargo.toml b/primitives/transaction-storage-proof/Cargo.toml new file mode 100644 index 0000000000000..bbdcb9f989f0b --- /dev/null +++ b/primitives/transaction-storage-proof/Cargo.toml @@ -0,0 +1,36 @@ +[package] +name = "sp-transaction-storage-proof" +version = "3.0.0" +authors = ["Parity Technologies "] +description = "Transaction storage proof primitives" +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +sp-inherents = { version = "3.0.0", default-features = false, path = "../inherents" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../runtime" } +sp-std = { version = "3.0.0", default-features = false, path = "../std" } +sp-trie = { version = "3.0.0", optional = true, path = "../trie" } +sp-core = { version = "3.0.0", path = "../core", optional = true } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +log = { version = "0.4.8", optional = true } +async-trait = { version = "0.1.48", optional = true } + +[features] +default = [ "std" ] +std = [ + "codec/std", + "sp-std/std", + "sp-inherents/std", + "sp-runtime/std", + "sp-trie/std", + "sp-core", + "log", + "async-trait", +] diff --git a/primitives/transaction-storage-proof/README.md b/primitives/transaction-storage-proof/README.md new file mode 100644 index 0000000000000..1aa1805cfc5e7 --- /dev/null +++ b/primitives/transaction-storage-proof/README.md @@ -0,0 +1,3 @@ +Authorship Primitives + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/transaction-storage-proof/src/lib.rs b/primitives/transaction-storage-proof/src/lib.rs new file mode 100644 index 0000000000000..825de27b2a5a9 --- /dev/null +++ b/primitives/transaction-storage-proof/src/lib.rs @@ -0,0 +1,240 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Storge proof primitives. Constains types and basic code to extract storage +//! proofs for indexed transactions. + +#![cfg_attr(not(feature = "std"), no_std)] + +use sp_std::{result::Result, prelude::*}; + +use codec::{Encode, Decode}; +use sp_inherents::{InherentIdentifier, InherentData, IsFatalError}; +use sp_runtime::{traits::{Block as BlockT, NumberFor}}; + +pub use sp_inherents::Error; + +/// The identifier for the proof inherent. +pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"tx_proof"; +/// Storage period for data. +pub const DEFAULT_STORAGE_PERIOD: u32 = 100800; +/// Proof trie value size. +pub const CHUNK_SIZE: usize = 256; + +/// Errors that can occur while checking the storage proof. +#[derive(Encode, sp_runtime::RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Decode))] +pub enum InherentError { + InvalidProof, + TrieError +} + +impl IsFatalError for InherentError { + fn is_fatal_error(&self) -> bool { + true + } +} + +#[derive(Encode, Decode, Clone, PartialEq, Debug)] +pub struct TransactionStorageProof { + /// Data chunk that is proved to exist. + pub chunk: Vec, + /// Trie nodes that compose the proof. + pub proof: Vec>, +} + +/// Auxiliary trait to extract storage proof. +pub trait TransactionStorageProofInherentData { + /// Get the proof. + fn storage_proof(&self) -> Result, Error>; +} + +impl TransactionStorageProofInherentData for InherentData { + fn storage_proof(&self) -> Result, Error> { + Ok(self.get_data(&INHERENT_IDENTIFIER)?) + } +} + +/// Provider for inherent data. +#[cfg(feature = "std")] +pub struct InherentDataProvider { + proof: Option, +} + +#[cfg(feature = "std")] +impl InherentDataProvider { + pub fn new(proof: Option) -> Self { + InherentDataProvider { proof } + } +} + +#[cfg(feature = "std")] +#[async_trait::async_trait] +impl sp_inherents::InherentDataProvider for InherentDataProvider { + fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> { + if let Some(proof) = &self.proof { + inherent_data.put_data(INHERENT_IDENTIFIER, proof) + } else { + Ok(()) + } + } + + async fn try_handle_error( + &self, + identifier: &InherentIdentifier, + error: &[u8], + ) -> Option> { + if *identifier != INHERENT_IDENTIFIER { + return None + } + + let error = InherentError::decode(&mut &error[..]).ok()?; + + Some(Err(Error::Application(Box::from(format!("{:?}", error))))) + } +} + +/// An utility function to extract chunk index from the source of randomness. +pub fn random_chunk(random_hash: &[u8], total_chunks: u32) -> u32 { + let mut buf = [0u8; 8]; + buf.copy_from_slice(&random_hash[0..8]); + let random_u64 = u64::from_be_bytes(buf); + (random_u64 % total_chunks as u64) as u32 +} + +/// An utility function to enocde transaction index as trie key. +pub fn encode_index(input: u32) -> Vec { + codec::Encode::encode(&codec::Compact(input)) +} + +/// An interface to request indexed data from the client. +pub trait IndexedBody { + fn block_indexed_body( + &self, + number: NumberFor, + ) -> Result>>, Error>; + + fn number( + &self, + hash: B::Hash, + ) -> Result>, Error>; +} + +#[cfg(feature = "std")] +pub mod registration { + use sp_runtime::{traits::{Block as BlockT, Saturating, Zero, One}}; + use sp_trie::TrieMut; + use super::*; + + type Hasher = sp_core::Blake2Hasher; + type TrieLayout = sp_trie::Layout::; + + /// Create a new inherent data provider instance for a given parent block hash. + pub fn new_data_provider( + client: &C, + parent: &B::Hash, + ) -> Result + where + B: BlockT, + C: IndexedBody, + { + let parent_number = client.number(parent.clone())?.unwrap_or(Zero::zero()); + let number = parent_number + .saturating_add(One::one()) + .saturating_sub(DEFAULT_STORAGE_PERIOD.into()); + if number.is_zero() { + // Too early to collect proofs. + return Ok(InherentDataProvider::new(None)); + } + + let proof = match client.block_indexed_body(number)? { + Some(transactions) => { + Some(build_proof(parent.as_ref(), transactions)?) + }, + None => { + // Nothing was indexed in that block. + None + } + }; + Ok(InherentDataProvider::new(proof)) + } + + /// Build a proof for a given source of randomness and indexed transactions. + pub fn build_proof(random_hash: &[u8], transactions: Vec>) + -> Result + { + let mut db = sp_trie::MemoryDB::::default(); + + let mut target_chunk = None; + let mut target_root = Default::default(); + let mut target_chunk_key = Default::default(); + let mut chunk_proof = Default::default(); + + let total_chunks: u64 = transactions.iter().map(|t| ((t.len() + CHUNK_SIZE - 1) / CHUNK_SIZE) as u64).sum(); + let mut buf = [0u8; 8]; + buf.copy_from_slice(&random_hash[0..8]); + let random_u64 = u64::from_be_bytes(buf); + let target_chunk_index = random_u64 % total_chunks; + //Generate tries for each transaction. + let mut chunk_index = 0; + for transaction in transactions { + let mut transaction_root = sp_trie::empty_trie_root::(); + { + let mut trie = sp_trie::TrieDBMut::::new(&mut db, &mut transaction_root); + let chunks = transaction.chunks(CHUNK_SIZE).map(|c| c.to_vec()); + for (index, chunk) in chunks.enumerate() { + let index = encode_index(index as u32); + trie.insert(&index, &chunk) + .map_err(|e| Error::Application(Box::new(e)))?; + if chunk_index == target_chunk_index { + target_chunk = Some(chunk); + target_chunk_key = index; + } + chunk_index += 1; + } + trie.commit(); + } + if target_chunk.is_some() && target_root == Default::default() { + target_root = transaction_root.clone(); + chunk_proof = sp_trie::generate_trie_proof::( + &db, + transaction_root.clone(), + &[target_chunk_key.clone()] + ).map_err(|e| Error::Application(Box::new(e)))?; + } + }; + + Ok(TransactionStorageProof { + proof: chunk_proof, + chunk: target_chunk.unwrap(), + }) + } + + #[test] + fn build_proof_check() { + use std::str::FromStr; + let random = [0u8; 32]; + let proof = build_proof(&random, vec![vec![42]]).unwrap(); + let root = sp_core::H256::from_str("0xff8611a4d212fc161dae19dd57f0f1ba9309f45d6207da13f2d3eab4c6839e91").unwrap(); + sp_trie::verify_trie_proof::( + &root, + &proof.proof, + &[(encode_index(0), Some(proof.chunk))], + ).unwrap(); + } +} + diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index f0b2bfd4bc3d3..d8394a89de526 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -58,6 +58,10 @@ impl StorageProof { StorageProofNodeIterator::new(self) } + /// Convert into plain node vector. + pub fn into_nodes(self) -> Vec> { + self.trie_nodes + } /// Creates a `MemoryDB` from `Self`. pub fn into_memory_db(self) -> crate::MemoryDB { self.into() From 7d8a9b6d9862f208e34a7f715448b21250b653e2 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Fri, 4 Jun 2021 09:05:21 +0200 Subject: [PATCH 02/61] more useful error message (#9014) --- client/finality-grandpa/src/import.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index 482859b1f79ef..474f6ee5bf7e5 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -646,9 +646,10 @@ where initial_sync: bool, ) -> Result<(), ConsensusError> { if justification.0 != GRANDPA_ENGINE_ID { - return Err(ConsensusError::ClientImport( - "GRANDPA can only import GRANDPA Justifications.".into(), - )); + return Err(ConsensusError::ClientImport(format!( + "Expected GRANDPA Justification, got {}.", + String::from_utf8_lossy(&justification.0) + ))); } let justification = GrandpaJustification::decode_and_verify_finalizes( From 0495ead464cc632237d0f9d1687b1ebbb579c7a7 Mon Sep 17 00:00:00 2001 From: Xiliang Chen Date: Fri, 4 Jun 2021 19:32:46 +1200 Subject: [PATCH 03/61] Named reserve (#7778) * add NamedReservableCurrency * move currency related trait and types into a new file * implement NamedReservableCurrency * remove empty reserves * Update frame/support/src/traits.rs Co-authored-by: Shawn Tabrizi * fix build * bump year * add MaxReserves * repatriate_reserved_named should put reserved fund into named reserved * add tests * add some docs * fix warning * Update lib.rs * fix test * fix test * fix * fix * triggier CI * Move NamedReservableCurrency. * Use strongly bounded vec for reserves. * Fix test. * remove duplicated file * trigger CI * Make `ReserveIdentifier` assosicated type * add helpers * make ReserveIdentifier assosicated type * fix * update * trigger CI * Apply suggestions from code review Co-authored-by: Shawn Tabrizi * trigger CI * Apply suggestions from code review Co-authored-by: Shawn Tabrizi Co-authored-by: Gavin Wood Co-authored-by: Shaun Wang --- bin/node-template/runtime/src/lib.rs | 2 + bin/node/runtime/src/lib.rs | 3 + frame/assets/src/mock.rs | 2 + frame/atomic-swap/src/tests.rs | 2 + frame/babe/src/mock.rs | 2 + frame/balances/src/lib.rs | 225 +++++++++++++++++- frame/balances/src/tests.rs | 118 +++++++++ frame/balances/src/tests_composite.rs | 6 + frame/balances/src/tests_local.rs | 3 + frame/balances/src/tests_reentrancy.rs | 3 + frame/bounties/src/tests.rs | 2 + frame/contracts/src/tests.rs | 2 + frame/democracy/src/tests.rs | 2 + .../election-provider-multi-phase/src/mock.rs | 2 + frame/elections-phragmen/src/lib.rs | 2 + frame/elections/src/mock.rs | 2 + frame/example/src/tests.rs | 2 + frame/executive/src/lib.rs | 2 + frame/gilt/src/mock.rs | 2 + frame/grandpa/src/mock.rs | 2 + frame/identity/src/tests.rs | 2 + frame/indices/src/mock.rs | 2 + frame/lottery/src/mock.rs | 2 + frame/multisig/src/tests.rs | 2 + frame/nicks/src/lib.rs | 2 + frame/offences/benchmarking/src/mock.rs | 2 + frame/proxy/src/tests.rs | 2 + frame/recovery/src/mock.rs | 2 + frame/scored-pool/src/mock.rs | 2 + frame/session/benchmarking/src/mock.rs | 2 + frame/society/src/mock.rs | 2 + frame/staking/fuzzer/src/mock.rs | 2 + frame/staking/src/mock.rs | 2 + frame/support/src/traits.rs | 3 +- frame/support/src/traits/tokens/currency.rs | 2 +- .../src/traits/tokens/currency/reservable.rs | 113 +++++++++ frame/tips/src/tests.rs | 2 + frame/transaction-payment/src/lib.rs | 2 + frame/treasury/src/tests.rs | 2 + frame/uniques/src/mock.rs | 3 + frame/utility/src/tests.rs | 2 + frame/vesting/src/lib.rs | 2 + 42 files changed, 538 insertions(+), 5 deletions(-) diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index b928f8d3410ef..e51a190ae9a0d 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -244,6 +244,8 @@ parameter_types! { impl pallet_balances::Config for Runtime { type MaxLocks = MaxLocks; + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; /// The type for recording an account's balance. type Balance = Balance; /// The ubiquitous event type. diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 97975c55e9601..6c38bf41ec591 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -375,10 +375,13 @@ parameter_types! { // For weight estimation, we assume that the most locks on an individual account will be 50. // This number may need to be adjusted in the future if this assumption no longer holds true. pub const MaxLocks: u32 = 50; + pub const MaxReserves: u32 = 50; } impl pallet_balances::Config for Runtime { type MaxLocks = MaxLocks; + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; type Balance = Balance; type DustRemoval = (); type Event = Event; diff --git a/frame/assets/src/mock.rs b/frame/assets/src/mock.rs index 0b7aa339835ec..cf99eed703cdf 100644 --- a/frame/assets/src/mock.rs +++ b/frame/assets/src/mock.rs @@ -80,6 +80,8 @@ impl pallet_balances::Config for Test { type AccountStore = System; type WeightInfo = (); type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; } parameter_types! { diff --git a/frame/atomic-swap/src/tests.rs b/frame/atomic-swap/src/tests.rs index cc2849f5bd2c0..f41874a1eec48 100644 --- a/frame/atomic-swap/src/tests.rs +++ b/frame/atomic-swap/src/tests.rs @@ -60,6 +60,8 @@ parameter_types! { } impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u64; type DustRemoval = (); type Event = Event; diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index 40ee782e721d6..bd99531542471 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -155,6 +155,8 @@ parameter_types! { impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u128; type DustRemoval = (); type Event = Event; diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index 04dacc7858646..23c5cc97d0937 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -73,6 +73,7 @@ //! - [`Currency`](frame_support::traits::Currency): Functions for dealing with a //! fungible assets system. //! - [`ReservableCurrency`](frame_support::traits::ReservableCurrency): +//! - [`NamedReservableCurrency`](frame_support::traits::NamedReservableCurrency): //! Functions for dealing with assets that can be reserved from an account. //! - [`LockableCurrency`](frame_support::traits::LockableCurrency): Functions for //! dealing with accounts that allow liquidity restrictions. @@ -163,9 +164,9 @@ use frame_support::{ traits::{ Currency, OnUnbalanced, TryDrop, StoredMap, MaxEncodedLen, WithdrawReasons, LockIdentifier, LockableCurrency, ExistenceRequirement, - Imbalance, SignedImbalance, ReservableCurrency, Get, ExistenceRequirement::KeepAlive, - ExistenceRequirement::AllowDeath, - tokens::{fungible, DepositConsequence, WithdrawConsequence, BalanceStatus as Status} + Imbalance, SignedImbalance, ReservableCurrency, Get, ExistenceRequirement::{AllowDeath, KeepAlive}, + NamedReservableCurrency, + tokens::{fungible, DepositConsequence, WithdrawConsequence, BalanceStatus as Status}, } }; #[cfg(feature = "std")] @@ -214,6 +215,12 @@ pub mod pallet { /// The maximum number of locks that should exist on an account. /// Not strictly enforced, but used for weight estimation. type MaxLocks: Get; + + /// The maximum number of named reserves that can exist on an account. + type MaxReserves: Get; + + /// The id type for named reserves. + type ReserveIdentifier: Parameter + Member + MaxEncodedLen + Ord + Copy; } #[pallet::pallet] @@ -409,6 +416,8 @@ pub mod pallet { ExistingVestingSchedule, /// Beneficiary account must pre-exist DeadAccount, + /// Number of named reserves exceed MaxReserves + TooManyReserves, } /// The total units issued in the system. @@ -444,6 +453,17 @@ pub mod pallet { ConstU32<300_000>, >; + /// Named reserves on some account balances. + #[pallet::storage] + #[pallet::getter(fn reserves)] + pub type Reserves, I: 'static = ()> = StorageMap< + _, + Blake2_128Concat, + T::AccountId, + BoundedVec, T::MaxReserves>, + ValueQuery + >; + /// Storage version of the pallet. /// /// This is set to v2.0.0 for new networks. @@ -560,6 +580,15 @@ pub struct BalanceLock { pub reasons: Reasons, } +/// Store named reserved balance. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, MaxEncodedLen)] +pub struct ReserveData { + /// The identifier for the named reserve. + pub id: ReserveIdentifier, + /// The amount of the named reserve. + pub amount: Balance, +} + /// All balance information for an account. #[derive(Encode, Decode, Clone, PartialEq, Eq, Default, RuntimeDebug, MaxEncodedLen)] pub struct AccountData { @@ -575,6 +604,7 @@ pub struct AccountData { /// /// This balance is a 'reserve' balance that other subsystems use in order to set aside tokens /// that are still 'owned' by the account holder, but which are suspendable. + /// This includes named reserve and unnamed reserve. pub reserved: Balance, /// The amount that `free` may not drop below when withdrawing for *anything except transaction /// fee payment*. @@ -1648,6 +1678,195 @@ impl, I: 'static> ReservableCurrency for Pallet } } +impl, I: 'static> NamedReservableCurrency for Pallet where + T::Balance: MaybeSerializeDeserialize + Debug +{ + type ReserveIdentifier = T::ReserveIdentifier; + + fn reserved_balance_named(id: &Self::ReserveIdentifier, who: &T::AccountId) -> Self::Balance { + let reserves = Self::reserves(who); + reserves + .binary_search_by_key(id, |data| data.id) + .map(|index| reserves[index].amount) + .unwrap_or_default() + } + + /// Move `value` from the free balance from `who` to a named reserve balance. + /// + /// Is a no-op if value to be reserved is zero. + fn reserve_named(id: &Self::ReserveIdentifier, who: &T::AccountId, value: Self::Balance) -> DispatchResult { + if value.is_zero() { return Ok(()) } + + Reserves::::try_mutate(who, |reserves| -> DispatchResult { + match reserves.binary_search_by_key(id, |data| data.id) { + Ok(index) => { + // this add can't overflow but just to be defensive. + reserves[index].amount = reserves[index].amount.saturating_add(value); + }, + Err(index) => { + reserves.try_insert(index, ReserveData { + id: id.clone(), + amount: value + }).map_err(|_| Error::::TooManyReserves)?; + }, + }; + >::reserve(who, value)?; + Ok(()) + }) + } + + /// Unreserve some funds, returning any amount that was unable to be unreserved. + /// + /// Is a no-op if the value to be unreserved is zero. + fn unreserve_named(id: &Self::ReserveIdentifier, who: &T::AccountId, value: Self::Balance) -> Self::Balance { + if value.is_zero() { return Zero::zero() } + + Reserves::::mutate_exists(who, |maybe_reserves| -> Self::Balance { + if let Some(reserves) = maybe_reserves.as_mut() { + match reserves.binary_search_by_key(id, |data| data.id) { + Ok(index) => { + let to_change = cmp::min(reserves[index].amount, value); + + let remain = >::unreserve(who, to_change); + + // remain should always be zero but just to be defensive here + let actual = to_change.saturating_sub(remain); + + // `actual <= to_change` and `to_change <= amount`; qed; + reserves[index].amount -= actual; + + if reserves[index].amount.is_zero() { + if reserves.len() == 1 { + // no more named reserves + *maybe_reserves = None; + } else { + // remove this named reserve + reserves.remove(index); + } + } + + value - actual + }, + Err(_) => { + value + }, + } + } else { + value + } + }) + } + + /// Slash from reserved balance, returning the negative imbalance created, + /// and any amount that was unable to be slashed. + /// + /// Is a no-op if the value to be slashed is zero. + fn slash_reserved_named( + id: &Self::ReserveIdentifier, + who: &T::AccountId, + value: Self::Balance + ) -> (Self::NegativeImbalance, Self::Balance) { + if value.is_zero() { return (NegativeImbalance::zero(), Zero::zero()) } + + Reserves::::mutate(who, |reserves| -> (Self::NegativeImbalance, Self::Balance) { + match reserves.binary_search_by_key(id, |data| data.id) { + Ok(index) => { + let to_change = cmp::min(reserves[index].amount, value); + + let (imb, remain) = >::slash_reserved(who, to_change); + + // remain should always be zero but just to be defensive here + let actual = to_change.saturating_sub(remain); + + // `actual <= to_change` and `to_change <= amount`; qed; + reserves[index].amount -= actual; + + (imb, value - actual) + }, + Err(_) => { + (NegativeImbalance::zero(), value) + }, + } + }) + } + + /// Move the reserved balance of one account into the balance of another, according to `status`. + /// If `status` is `Reserved`, the balance will be reserved with given `id`. + /// + /// Is a no-op if: + /// - the value to be moved is zero; or + /// - the `slashed` id equal to `beneficiary` and the `status` is `Reserved`. + fn repatriate_reserved_named( + id: &Self::ReserveIdentifier, + slashed: &T::AccountId, + beneficiary: &T::AccountId, + value: Self::Balance, + status: Status, + ) -> Result { + if value.is_zero() { return Ok(Zero::zero()) } + + if slashed == beneficiary { + return match status { + Status::Free => Ok(Self::unreserve_named(id, slashed, value)), + Status::Reserved => Ok(value.saturating_sub(Self::reserved_balance_named(id, slashed))), + }; + } + + Reserves::::try_mutate(slashed, |reserves| -> Result { + match reserves.binary_search_by_key(id, |data| data.id) { + Ok(index) => { + let to_change = cmp::min(reserves[index].amount, value); + + let actual = if status == Status::Reserved { + // make it the reserved under same identifier + Reserves::::try_mutate(beneficiary, |reserves| -> Result { + match reserves.binary_search_by_key(id, |data| data.id) { + Ok(index) => { + let remain = >::repatriate_reserved(slashed, beneficiary, to_change, status)?; + + // remain should always be zero but just to be defensive here + let actual = to_change.saturating_sub(remain); + + // this add can't overflow but just to be defensive. + reserves[index].amount = reserves[index].amount.saturating_add(actual); + + Ok(actual) + }, + Err(index) => { + let remain = >::repatriate_reserved(slashed, beneficiary, to_change, status)?; + + // remain should always be zero but just to be defensive here + let actual = to_change.saturating_sub(remain); + + reserves.try_insert(index, ReserveData { + id: id.clone(), + amount: actual + }).map_err(|_| Error::::TooManyReserves)?; + + Ok(actual) + }, + } + })? + } else { + let remain = >::repatriate_reserved(slashed, beneficiary, to_change, status)?; + + // remain should always be zero but just to be defensive here + to_change.saturating_sub(remain) + }; + + // `actual <= to_change` and `to_change <= amount`; qed; + reserves[index].amount -= actual; + + Ok(value - actual) + }, + Err(_) => { + Ok(value) + }, + } + }) + } +} + impl, I: 'static> LockableCurrency for Pallet where T::Balance: MaybeSerializeDeserialize + Debug diff --git a/frame/balances/src/tests.rs b/frame/balances/src/tests.rs index 38a49df37bdff..9589fb25805b4 100644 --- a/frame/balances/src/tests.rs +++ b/frame/balances/src/tests.rs @@ -964,5 +964,123 @@ macro_rules! decl_tests { assert_eq!(Balances::total_balance(&2), 100); }); } + + #[test] + fn named_reserve_should_work() { + <$ext_builder>::default().build().execute_with(|| { + let _ = Balances::deposit_creating(&1, 111); + + let id_1 = [1u8; 8]; + let id_2 = [2u8; 8]; + let id_3 = [3u8; 8]; + + // reserve + + assert_noop!(Balances::reserve_named(&id_1, &1, 112), Error::::InsufficientBalance); + + assert_ok!(Balances::reserve_named(&id_1, &1, 12)); + + assert_eq!(Balances::reserved_balance(1), 12); + assert_eq!(Balances::reserved_balance_named(&id_1, &1), 12); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 0); + + assert_ok!(Balances::reserve_named(&id_1, &1, 2)); + + assert_eq!(Balances::reserved_balance(1), 14); + assert_eq!(Balances::reserved_balance_named(&id_1, &1), 14); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 0); + + assert_ok!(Balances::reserve_named(&id_2, &1, 23)); + + assert_eq!(Balances::reserved_balance(1), 37); + assert_eq!(Balances::reserved_balance_named(&id_1, &1), 14); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 23); + + assert_ok!(Balances::reserve(&1, 34)); + + assert_eq!(Balances::reserved_balance(1), 71); + assert_eq!(Balances::reserved_balance_named(&id_1, &1), 14); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 23); + + assert_eq!(Balances::total_balance(&1), 111); + assert_eq!(Balances::free_balance(1), 40); + + assert_noop!(Balances::reserve_named(&id_3, &1, 2), Error::::TooManyReserves); + + // unreserve + + assert_eq!(Balances::unreserve_named(&id_1, &1, 10), 0); + + assert_eq!(Balances::reserved_balance(1), 61); + assert_eq!(Balances::reserved_balance_named(&id_1, &1), 4); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 23); + + assert_eq!(Balances::unreserve_named(&id_1, &1, 5), 1); + + assert_eq!(Balances::reserved_balance(1), 57); + assert_eq!(Balances::reserved_balance_named(&id_1, &1), 0); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 23); + + assert_eq!(Balances::unreserve_named(&id_2, &1, 3), 0); + + assert_eq!(Balances::reserved_balance(1), 54); + assert_eq!(Balances::reserved_balance_named(&id_1, &1), 0); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 20); + + assert_eq!(Balances::total_balance(&1), 111); + assert_eq!(Balances::free_balance(1), 57); + + // slash_reserved_named + + assert_ok!(Balances::reserve_named(&id_1, &1, 10)); + + assert_eq!(Balances::slash_reserved_named(&id_1, &1, 25).1, 15); + + assert_eq!(Balances::reserved_balance(1), 54); + assert_eq!(Balances::reserved_balance_named(&id_1, &1), 0); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 20); + assert_eq!(Balances::total_balance(&1), 101); + + assert_eq!(Balances::slash_reserved_named(&id_2, &1, 5).1, 0); + + assert_eq!(Balances::reserved_balance(1), 49); + assert_eq!(Balances::reserved_balance_named(&id_1, &1), 0); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 15); + assert_eq!(Balances::total_balance(&1), 96); + + // repatriate_reserved_named + + let _ = Balances::deposit_creating(&2, 100); + + assert_eq!(Balances::repatriate_reserved_named(&id_2, &1, &2, 10, Status::Reserved).unwrap(), 0); + + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 5); + assert_eq!(Balances::reserved_balance_named(&id_2, &2), 10); + assert_eq!(Balances::reserved_balance(&2), 10); + + assert_eq!(Balances::repatriate_reserved_named(&id_2, &2, &1, 11, Status::Reserved).unwrap(), 1); + + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 15); + assert_eq!(Balances::reserved_balance_named(&id_2, &2), 0); + assert_eq!(Balances::reserved_balance(&2), 0); + + assert_eq!(Balances::repatriate_reserved_named(&id_2, &1, &2, 10, Status::Free).unwrap(), 0); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 5); + assert_eq!(Balances::reserved_balance_named(&id_2, &2), 0); + assert_eq!(Balances::free_balance(&2), 110); + + // repatriate_reserved_named to self + + assert_eq!(Balances::repatriate_reserved_named(&id_2, &1, &1, 10, Status::Reserved).unwrap(), 5); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 5); + + assert_eq!(Balances::free_balance(&1), 47); + + assert_eq!(Balances::repatriate_reserved_named(&id_2, &1, &1, 15, Status::Free).unwrap(), 10); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 0); + + assert_eq!(Balances::free_balance(&1), 52); + }); + } } } diff --git a/frame/balances/src/tests_composite.rs b/frame/balances/src/tests_composite.rs index b4bdb13fbb838..ff10607bcee09 100644 --- a/frame/balances/src/tests_composite.rs +++ b/frame/balances/src/tests_composite.rs @@ -87,6 +87,10 @@ impl pallet_transaction_payment::Config for Test { type FeeMultiplierUpdate = (); } +parameter_types! { + pub const MaxReserves: u32 = 2; +} + impl Config for Test { type Balance = u64; type DustRemoval = (); @@ -94,6 +98,8 @@ impl Config for Test { type ExistentialDeposit = ExistentialDeposit; type AccountStore = frame_system::Pallet; type MaxLocks = (); + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; type WeightInfo = (); } diff --git a/frame/balances/src/tests_local.rs b/frame/balances/src/tests_local.rs index ac5adfd8d1f3d..afa68764573e0 100644 --- a/frame/balances/src/tests_local.rs +++ b/frame/balances/src/tests_local.rs @@ -90,6 +90,7 @@ impl pallet_transaction_payment::Config for Test { } parameter_types! { pub const MaxLocks: u32 = 50; + pub const MaxReserves: u32 = 2; } impl Config for Test { type Balance = u64; @@ -103,6 +104,8 @@ impl Config for Test { super::AccountData, >; type MaxLocks = MaxLocks; + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; type WeightInfo = (); } diff --git a/frame/balances/src/tests_reentrancy.rs b/frame/balances/src/tests_reentrancy.rs index 91ad51446c196..a12da8f001d80 100644 --- a/frame/balances/src/tests_reentrancy.rs +++ b/frame/balances/src/tests_reentrancy.rs @@ -106,6 +106,7 @@ impl OnUnbalanced> for OnDustRemoval { } parameter_types! { pub const MaxLocks: u32 = 50; + pub const MaxReserves: u32 = 2; } impl Config for Test { type Balance = u64; @@ -119,6 +120,8 @@ impl Config for Test { super::AccountData, >; type MaxLocks = MaxLocks; + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; type WeightInfo = (); } diff --git a/frame/bounties/src/tests.rs b/frame/bounties/src/tests.rs index e90b1f565a4c9..04cc06ef64b8d 100644 --- a/frame/bounties/src/tests.rs +++ b/frame/bounties/src/tests.rs @@ -88,6 +88,8 @@ parameter_types! { } impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u64; type Event = Event; type DustRemoval = (); diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 6fdaecebd85f0..75ea8d9bd89b6 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -237,6 +237,8 @@ impl frame_system::Config for Test { } impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u64; type Event = Event; type DustRemoval = (); diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs index e8877e2774c78..1c68715d49e3e 100644 --- a/frame/democracy/src/tests.rs +++ b/frame/democracy/src/tests.rs @@ -123,6 +123,8 @@ parameter_types! { pub const MaxLocks: u32 = 10; } impl pallet_balances::Config for Test { + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type MaxLocks = MaxLocks; type Balance = u64; type Event = Event; diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index 2fb7927d98f91..830df099b5d08 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -237,6 +237,8 @@ impl pallet_balances::Config for Runtime { type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type WeightInfo = (); } diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index 9efe8c826091a..ab2edfaac6c29 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -1159,6 +1159,8 @@ mod tests { type ExistentialDeposit = ExistentialDeposit; type AccountStore = frame_system::Pallet; type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type WeightInfo = (); } diff --git a/frame/elections/src/mock.rs b/frame/elections/src/mock.rs index 896fd40020e41..b5dd15ce8119b 100644 --- a/frame/elections/src/mock.rs +++ b/frame/elections/src/mock.rs @@ -66,6 +66,8 @@ parameter_types! { } impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u64; type DustRemoval = (); type Event = Event; diff --git a/frame/example/src/tests.rs b/frame/example/src/tests.rs index 496cd5701fe58..f4658c2807647 100644 --- a/frame/example/src/tests.rs +++ b/frame/example/src/tests.rs @@ -83,6 +83,8 @@ parameter_types! { } impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u64; type DustRemoval = (); type Event = Event; diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index c630fb639960b..593b8db92c60d 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -721,6 +721,8 @@ mod tests { type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type WeightInfo = (); } diff --git a/frame/gilt/src/mock.rs b/frame/gilt/src/mock.rs index f5c0d3a5aabef..fb888515496b1 100644 --- a/frame/gilt/src/mock.rs +++ b/frame/gilt/src/mock.rs @@ -85,6 +85,8 @@ impl pallet_balances::Config for Test { type AccountStore = System; type WeightInfo = (); type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; } parameter_types! { diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index 1ab28f7752ef0..df55f6037e303 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -150,6 +150,8 @@ parameter_types! { impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u128; type DustRemoval = (); type Event = Event; diff --git a/frame/identity/src/tests.rs b/frame/identity/src/tests.rs index 2bfad79640c2f..262b3211b6d1b 100644 --- a/frame/identity/src/tests.rs +++ b/frame/identity/src/tests.rs @@ -83,6 +83,8 @@ impl pallet_balances::Config for Test { type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type WeightInfo = (); } parameter_types! { diff --git a/frame/indices/src/mock.rs b/frame/indices/src/mock.rs index efaaa0212467b..bd9e9c33af25e 100644 --- a/frame/indices/src/mock.rs +++ b/frame/indices/src/mock.rs @@ -77,6 +77,8 @@ parameter_types! { impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u64; type DustRemoval = (); type Event = Event; diff --git a/frame/lottery/src/mock.rs b/frame/lottery/src/mock.rs index b668fba85951b..07593c17e5086 100644 --- a/frame/lottery/src/mock.rs +++ b/frame/lottery/src/mock.rs @@ -87,6 +87,8 @@ parameter_types! { impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u64; type Event = Event; type DustRemoval = (); diff --git a/frame/multisig/src/tests.rs b/frame/multisig/src/tests.rs index cf457f6db6022..69f7cb17b0f5a 100644 --- a/frame/multisig/src/tests.rs +++ b/frame/multisig/src/tests.rs @@ -78,6 +78,8 @@ parameter_types! { } impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u64; type Event = Event; type DustRemoval = (); diff --git a/frame/nicks/src/lib.rs b/frame/nicks/src/lib.rs index 4372fd326cc94..a76d4506f93bc 100644 --- a/frame/nicks/src/lib.rs +++ b/frame/nicks/src/lib.rs @@ -304,6 +304,8 @@ mod tests { } impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u64; type Event = Event; type DustRemoval = (); diff --git a/frame/offences/benchmarking/src/mock.rs b/frame/offences/benchmarking/src/mock.rs index 9047120923ad6..7230c1215afc9 100644 --- a/frame/offences/benchmarking/src/mock.rs +++ b/frame/offences/benchmarking/src/mock.rs @@ -72,6 +72,8 @@ parameter_types! { } impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = Balance; type Event = Event; type DustRemoval = (); diff --git a/frame/proxy/src/tests.rs b/frame/proxy/src/tests.rs index fd632b91bb351..a2cb00d0ccc3d 100644 --- a/frame/proxy/src/tests.rs +++ b/frame/proxy/src/tests.rs @@ -80,6 +80,8 @@ parameter_types! { } impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u64; type Event = Event; type DustRemoval = (); diff --git a/frame/recovery/src/mock.rs b/frame/recovery/src/mock.rs index 72dbc29fd7160..6a0abab2bd12b 100644 --- a/frame/recovery/src/mock.rs +++ b/frame/recovery/src/mock.rs @@ -79,6 +79,8 @@ parameter_types! { impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u128; type DustRemoval = (); type Event = Event; diff --git a/frame/scored-pool/src/mock.rs b/frame/scored-pool/src/mock.rs index 8f7acd32007e7..44a28234a2a82 100644 --- a/frame/scored-pool/src/mock.rs +++ b/frame/scored-pool/src/mock.rs @@ -84,6 +84,8 @@ impl frame_system::Config for Test { impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u64; type Event = Event; type DustRemoval = (); diff --git a/frame/session/benchmarking/src/mock.rs b/frame/session/benchmarking/src/mock.rs index cf2fa8a07cfe0..87d1242812db2 100644 --- a/frame/session/benchmarking/src/mock.rs +++ b/frame/session/benchmarking/src/mock.rs @@ -74,6 +74,8 @@ parameter_types! { } impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = Balance; type Event = Event; type DustRemoval = (); diff --git a/frame/society/src/mock.rs b/frame/society/src/mock.rs index aa46d40a14ae9..5e156caa282eb 100644 --- a/frame/society/src/mock.rs +++ b/frame/society/src/mock.rs @@ -96,6 +96,8 @@ impl frame_system::Config for Test { impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u64; type Event = Event; type DustRemoval = (); diff --git a/frame/staking/fuzzer/src/mock.rs b/frame/staking/fuzzer/src/mock.rs index 11d810a26e175..4ac1a10364e6c 100644 --- a/frame/staking/fuzzer/src/mock.rs +++ b/frame/staking/fuzzer/src/mock.rs @@ -71,6 +71,8 @@ parameter_types! { } impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = Balance; type Event = Event; type DustRemoval = (); diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 4027ac1f670bc..8930a6bfd61c8 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -155,6 +155,8 @@ impl frame_system::Config for Test { } impl pallet_balances::Config for Test { type MaxLocks = MaxLocks; + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = Balance; type Event = Event; type DustRemoval = (); diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index 52def92ef9b47..96e1cece55065 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -23,7 +23,8 @@ pub mod tokens; pub use tokens::fungible; pub use tokens::fungibles; pub use tokens::currency::{ - Currency, LockIdentifier, LockableCurrency, ReservableCurrency, VestingSchedule, + Currency, LockIdentifier, LockableCurrency, ReservableCurrency, NamedReservableCurrency, + VestingSchedule, }; pub use tokens::imbalance::{Imbalance, OnUnbalanced, SignedImbalance}; pub use tokens::{ExistenceRequirement, WithdrawReasons, BalanceStatus}; diff --git a/frame/support/src/traits/tokens/currency.rs b/frame/support/src/traits/tokens/currency.rs index a18e0b6593bc0..a00e99b0c4ac3 100644 --- a/frame/support/src/traits/tokens/currency.rs +++ b/frame/support/src/traits/tokens/currency.rs @@ -25,7 +25,7 @@ use super::imbalance::{Imbalance, SignedImbalance}; use frame_support::traits::MaxEncodedLen; mod reservable; -pub use reservable::ReservableCurrency; +pub use reservable::{ReservableCurrency, NamedReservableCurrency}; mod lockable; pub use lockable::{LockableCurrency, VestingSchedule, LockIdentifier}; diff --git a/frame/support/src/traits/tokens/currency/reservable.rs b/frame/support/src/traits/tokens/currency/reservable.rs index 14ea1d3a16fb6..17dee7a8ae65a 100644 --- a/frame/support/src/traits/tokens/currency/reservable.rs +++ b/frame/support/src/traits/tokens/currency/reservable.rs @@ -81,3 +81,116 @@ pub trait ReservableCurrency: Currency { status: BalanceStatus, ) -> Result; } + +pub trait NamedReservableCurrency: ReservableCurrency { + /// An identifier for a reserve. Used for disambiguating different reserves so that + /// they can be individually replaced or removed. + type ReserveIdentifier; + + /// Deducts up to `value` from reserved balance of `who`. This function cannot fail. + /// + /// As much funds up to `value` will be deducted as possible. If the reserve balance of `who` + /// is less than `value`, then a non-zero second item will be returned. + fn slash_reserved_named( + id: &Self::ReserveIdentifier, + who: &AccountId, + value: Self::Balance + ) -> (Self::NegativeImbalance, Self::Balance); + + /// The amount of the balance of a given account that is externally reserved; this can still get + /// slashed, but gets slashed last of all. + /// + /// This balance is a 'reserve' balance that other subsystems use in order to set aside tokens + /// that are still 'owned' by the account holder, but which are suspendable. + /// + /// When this balance falls below the value of `ExistentialDeposit`, then this 'reserve account' + /// is deleted: specifically, `ReservedBalance`. + /// + /// `system::AccountNonce` is also deleted if `FreeBalance` is also zero (it also gets + /// collapsed to zero if it ever becomes less than `ExistentialDeposit`. + fn reserved_balance_named(id: &Self::ReserveIdentifier, who: &AccountId) -> Self::Balance; + + /// Moves `value` from balance to reserved balance. + /// + /// If the free balance is lower than `value`, then no funds will be moved and an `Err` will + /// be returned to notify of this. This is different behavior than `unreserve`. + fn reserve_named(id: &Self::ReserveIdentifier, who: &AccountId, value: Self::Balance) -> DispatchResult; + + /// Moves up to `value` from reserved balance to free balance. This function cannot fail. + /// + /// As much funds up to `value` will be moved as possible. If the reserve balance of `who` + /// is less than `value`, then the remaining amount will be returned. + /// + /// # NOTES + /// + /// - This is different from `reserve`. + /// - If the remaining reserved balance is less than `ExistentialDeposit`, it will + /// invoke `on_reserved_too_low` and could reap the account. + fn unreserve_named(id: &Self::ReserveIdentifier, who: &AccountId, value: Self::Balance) -> Self::Balance; + + /// Moves up to `value` from reserved balance of account `slashed` to balance of account + /// `beneficiary`. `beneficiary` must exist for this to succeed. If it does not, `Err` will be + /// returned. Funds will be placed in either the `free` balance or the `reserved` balance, + /// depending on the `status`. + /// + /// As much funds up to `value` will be deducted as possible. If this is less than `value`, + /// then `Ok(non_zero)` will be returned. + fn repatriate_reserved_named( + id: &Self::ReserveIdentifier, + slashed: &AccountId, + beneficiary: &AccountId, + value: Self::Balance, + status: BalanceStatus, + ) -> Result; + + /// Ensure the reserved balance is equal to `value`. + /// + /// This will reserve extra amount of current reserved balance is less than `value`. + /// And unreserve if current reserved balance is greater than `value`. + fn ensure_reserved_named(id: &Self::ReserveIdentifier, who: &AccountId, value: Self::Balance) -> DispatchResult { + let current = Self::reserved_balance_named(id, who); + if current > value { + // we always have enough balance to unreserve here + Self::unreserve_named(id, who, current - value); + Ok(()) + } else if value > current { + // we checked value > current + Self::reserve_named(id, who, value - current) + } else { // current == value + Ok(()) + } + } + + /// Unreserve all the named reserved balances, returning unreserved amount. + /// + /// Is a no-op if the value to be unreserved is zero. + fn unreserve_all_named(id: &Self::ReserveIdentifier, who: &AccountId) -> Self::Balance { + let value = Self::reserved_balance_named(id, who); + Self::slash_reserved_named(id, who, value); + value + } + + /// Slash all the reserved balance, returning the negative imbalance created. + /// + /// Is a no-op if the value to be slashed is zero. + fn slash_all_reserved_named(id: &Self::ReserveIdentifier, who: &AccountId) -> Self::NegativeImbalance { + let value = Self::reserved_balance_named(id, who); + Self::slash_reserved_named(id, who, value).0 + } + + /// Move all the named reserved balance of one account into the balance of another, according to `status`. + /// If `status` is `Reserved`, the balance will be reserved with given `id`. + /// + /// Is a no-op if: + /// - the value to be moved is zero; or + /// - the `slashed` id equal to `beneficiary` and the `status` is `Reserved`. + fn repatriate_all_reserved_named( + id: &Self::ReserveIdentifier, + slashed: &AccountId, + beneficiary: &AccountId, + status: BalanceStatus, + ) -> DispatchResult { + let value = Self::reserved_balance_named(id, slashed); + Self::repatriate_reserved_named(id, slashed, beneficiary, value, status).map(|_| ()) + } +} diff --git a/frame/tips/src/tests.rs b/frame/tips/src/tests.rs index 3b11e105c6d06..6b144273ca828 100644 --- a/frame/tips/src/tests.rs +++ b/frame/tips/src/tests.rs @@ -87,6 +87,8 @@ parameter_types! { } impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u64; type Event = Event; type DustRemoval = (); diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 3cf79caef7700..2b1ad2db9ae09 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -702,6 +702,8 @@ mod tests { type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type WeightInfo = (); } diff --git a/frame/treasury/src/tests.rs b/frame/treasury/src/tests.rs index cb6d4903a5732..408f99f29e1b1 100644 --- a/frame/treasury/src/tests.rs +++ b/frame/treasury/src/tests.rs @@ -83,6 +83,8 @@ parameter_types! { } impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u64; type Event = Event; type DustRemoval = (); diff --git a/frame/uniques/src/mock.rs b/frame/uniques/src/mock.rs index 1040821d0d886..336a262358b24 100644 --- a/frame/uniques/src/mock.rs +++ b/frame/uniques/src/mock.rs @@ -70,6 +70,7 @@ impl frame_system::Config for Test { parameter_types! { pub const ExistentialDeposit: u64 = 1; + pub const MaxReserves: u32 = 50; } impl pallet_balances::Config for Test { @@ -80,6 +81,8 @@ impl pallet_balances::Config for Test { type AccountStore = System; type WeightInfo = (); type MaxLocks = (); + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; } parameter_types! { diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index 02b878e799eea..aa6bea8a27d36 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -118,6 +118,8 @@ parameter_types! { } impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u64; type DustRemoval = (); type Event = Event; diff --git a/frame/vesting/src/lib.rs b/frame/vesting/src/lib.rs index c8156e08c69cb..8c520b715801e 100644 --- a/frame/vesting/src/lib.rs +++ b/frame/vesting/src/lib.rs @@ -473,6 +473,8 @@ mod tests { type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type MaxLocks = MaxLocks; + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type WeightInfo = (); } parameter_types! { From e98aca335f066d84d7a5cbabf280392f39e1cc99 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20P=C3=A1nik?= Date: Fri, 4 Jun 2021 11:01:05 +0200 Subject: [PATCH 04/61] update ss58 type to u16 (#8955) --- bin/node/runtime/src/lib.rs | 2 +- frame/system/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 6c38bf41ec591..14bf16d19778e 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -186,7 +186,7 @@ parameter_types! { }) .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) .build_or_panic(); - pub const SS58Prefix: u8 = 42; + pub const SS58Prefix: u16 = 42; } const_assert!(NORMAL_DISPATCH_RATIO.deconstruct() >= AVERAGE_ON_INITIALIZE_RATIO.deconstruct()); diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 6938df7e86c23..f0597ea2fe0f3 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -266,7 +266,7 @@ pub mod pallet { /// that the runtime should know about the prefix in order to make use of it as /// an identifier of the chain. #[pallet::constant] - type SS58Prefix: Get; + type SS58Prefix: Get; /// What to do if the user wants the code set to something. Just use `()` unless you are in /// cumulus. From d27dea95712696fcc5dd1fcc93f22926e0b9e57f Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Fri, 4 Jun 2021 13:27:05 +0200 Subject: [PATCH 05/61] Fixed build (#9021) --- frame/transaction-storage/src/mock.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/frame/transaction-storage/src/mock.rs b/frame/transaction-storage/src/mock.rs index 51eb61dd26b78..351893c08a33b 100644 --- a/frame/transaction-storage/src/mock.rs +++ b/frame/transaction-storage/src/mock.rs @@ -88,6 +88,8 @@ impl pallet_balances::Config for Test { type AccountStore = System; type WeightInfo = (); type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = (); } impl pallet_transaction_storage::Config for Test { From 2cff60c3be7b84d940b219399b1d2c8aa2e4b31d Mon Sep 17 00:00:00 2001 From: Andreas Doerr Date: Fri, 4 Jun 2021 16:46:16 +0200 Subject: [PATCH 06/61] Bump parity-db (#9024) --- Cargo.lock | 4 ++-- bin/node/bench/Cargo.toml | 2 +- client/db/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 97b64e07e4133..62056dd99b2e0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5691,9 +5691,9 @@ dependencies = [ [[package]] name = "parity-db" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "495197c078e54b8735181aa35c00a327f7f3a3cc00a1ee8c95926dd010f0ec6b" +checksum = "2e337f62db341435f0da05b8f6b97e984ef4ea5800510cd07c2d624688c40b47" dependencies = [ "blake2-rfc", "crc32fast", diff --git a/bin/node/bench/Cargo.toml b/bin/node/bench/Cargo.toml index 728eb8d6093ce..93ee35d98f98d 100644 --- a/bin/node/bench/Cargo.toml +++ b/bin/node/bench/Cargo.toml @@ -38,6 +38,6 @@ hex = "0.4.0" rand = { version = "0.7.2", features = ["small_rng"] } lazy_static = "1.4.0" parity-util-mem = { version = "0.9.0", default-features = false, features = ["primitive-types"] } -parity-db = { version = "0.2.2" } +parity-db = { version = "0.2.4" } sc-transaction-pool = { version = "3.0.0", path = "../../../client/transaction-pool" } futures = { version = "0.3.4", features = ["thread-pool"] } diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index e5e52494c2db6..43bae63f09c2b 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -35,7 +35,7 @@ sp-trie = { version = "3.0.0", path = "../../primitives/trie" } sp-consensus = { version = "0.9.0", path = "../../primitives/consensus/common" } sp-blockchain = { version = "3.0.0", path = "../../primitives/blockchain" } sp-database = { version = "3.0.0", path = "../../primitives/database" } -parity-db = { version = "0.2.3", optional = true } +parity-db = { version = "0.2.4", optional = true } prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.9.0", path = "../../utils/prometheus" } [dev-dependencies] From 37bb3ae7eb559afaf9c7dbf7fd99e08b282c8127 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Fri, 4 Jun 2021 22:31:06 +0100 Subject: [PATCH 07/61] consensus: handle justification sync for blocks authored locally (#8698) * consensus: add trait to control justification sync process * network: implement JustificationSyncLink for NetworkService * slots: handle justification sync in slot worker * babe: fix slot worker instantiation * aura: fix slot worker instantiation * pow: handle justification sync in miner * babe: fix tests * aura: fix tests * node: fix compilation * node-template: fix compilation * consensus: rename justification sync link parameter * aura: fix test compilation * consensus: slots: move JustificationSyncLink out of on_slot --- bin/node-template/node/src/service.rs | 3 +- bin/node/cli/src/service.rs | 1 + client/consensus/aura/src/lib.rs | 62 ++++++++++++------- client/consensus/babe/src/lib.rs | 53 +++++++++++----- client/consensus/babe/src/tests.rs | 5 +- client/consensus/pow/src/lib.rs | 11 ++-- client/consensus/pow/src/worker.rs | 19 ++++-- client/consensus/slots/src/lib.rs | 55 ++++++++++------ client/network/src/protocol.rs | 5 ++ client/network/src/protocol/sync.rs | 7 ++- client/network/src/service.rs | 20 ++++++ .../consensus/common/src/block_import.rs | 53 ++++++++++++++++ primitives/consensus/common/src/lib.rs | 4 +- 13 files changed, 228 insertions(+), 70 deletions(-) diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index f504904100765..8ed9c1ee50378 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -220,7 +220,7 @@ pub fn new_full(mut config: Configuration) -> Result let slot_duration = sc_consensus_aura::slot_duration(&*client)?; let raw_slot_duration = slot_duration.slot_duration(); - let aura = sc_consensus_aura::start_aura::( + let aura = sc_consensus_aura::start_aura::( StartAuraParams { slot_duration, client: client.clone(), @@ -243,6 +243,7 @@ pub fn new_full(mut config: Configuration) -> Result keystore: keystore_container.sync_keystore(), can_author_with, sync_oracle: network.clone(), + justification_sync_link: network.clone(), block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32), telemetry: telemetry.as_ref().map(|x| x.handle()), }, diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 42020e6668e42..a9ac2ac8065f9 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -308,6 +308,7 @@ pub fn new_full_base( env: proposer, block_import, sync_oracle: network.clone(), + justification_sync_link: network.clone(), create_inherent_data_providers: move |parent, ()| { let client_clone = client_clone.clone(); async move { diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 623096cd5c640..702e4dc0bf1bd 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -109,7 +109,7 @@ fn slot_author(slot: Slot, authorities: &[AuthorityId

]) -> Option<&A } /// Parameters of [`start_aura`]. -pub struct StartAuraParams { +pub struct StartAuraParams { /// The duration of a slot. pub slot_duration: SlotDuration, /// The client to interact with the chain. @@ -122,8 +122,10 @@ pub struct StartAuraParams { pub proposer_factory: PF, /// The sync oracle that can give us the current sync status. pub sync_oracle: SO, + /// Hook into the sync module to control the justification sync process. + pub justification_sync_link: L, /// Something that can create the inherent data providers. - pub create_inherent_data_providers: IDP, + pub create_inherent_data_providers: CIDP, /// Should we force the authoring of blocks? pub force_authoring: bool, /// The backoff strategy when we miss slots. @@ -143,7 +145,7 @@ pub struct StartAuraParams { } /// Start the aura worker. The returned future should be run in a futures executor. -pub fn start_aura( +pub fn start_aura( StartAuraParams { slot_duration, client, @@ -151,6 +153,7 @@ pub fn start_aura( block_import, proposer_factory, sync_oracle, + justification_sync_link, create_inherent_data_providers, force_authoring, backoff_authoring_blocks, @@ -158,31 +161,33 @@ pub fn start_aura( can_author_with, block_proposal_slot_portion, telemetry, - }: StartAuraParams, + }: StartAuraParams, ) -> Result, sp_consensus::Error> where + P: Pair + Send + Sync, + P::Public: AppPublic + Hash + Member + Encode + Decode, + P::Signature: TryFrom> + Hash + Member + Encode + Decode, B: BlockT, C: ProvideRuntimeApi + BlockOf + ProvideCache + AuxStore + HeaderBackend + Send + Sync, C::Api: AuraApi>, SC: SelectChain, + I: BlockImport> + Send + Sync + 'static, PF: Environment + Send + Sync + 'static, PF::Proposer: Proposer>, - P: Pair + Send + Sync, - P::Public: AppPublic + Hash + Member + Encode + Decode, - P::Signature: TryFrom> + Hash + Member + Encode + Decode, - I: BlockImport> + Send + Sync + 'static, - Error: std::error::Error + Send + From + 'static, SO: SyncOracle + Send + Sync + Clone, - CAW: CanAuthorWith + Send, + L: sp_consensus::JustificationSyncLink, + CIDP: CreateInherentDataProviders + Send, + CIDP::InherentDataProviders: InherentDataProviderExt + Send, BS: BackoffAuthoringBlocksStrategy> + Send + 'static, - IDP: CreateInherentDataProviders + Send, - IDP::InherentDataProviders: InherentDataProviderExt + Send, + CAW: CanAuthorWith + Send, + Error: std::error::Error + Send + From + 'static, { - let worker = build_aura_worker::(BuildAuraWorkerParams { + let worker = build_aura_worker::(BuildAuraWorkerParams { client: client.clone(), block_import, proposer_factory, keystore, sync_oracle: sync_oracle.clone(), + justification_sync_link, force_authoring, backoff_authoring_blocks, telemetry, @@ -200,7 +205,7 @@ pub fn start_aura( } /// Parameters of [`build_aura_worker`]. -pub struct BuildAuraWorkerParams { +pub struct BuildAuraWorkerParams { /// The client to interact with the chain. pub client: Arc, /// The block import. @@ -209,6 +214,8 @@ pub struct BuildAuraWorkerParams { pub proposer_factory: PF, /// The sync oracle that can give us the current sync status. pub sync_oracle: SO, + /// Hook into the sync module to control the justification sync process. + pub justification_sync_link: L, /// Should we force the authoring of blocks? pub force_authoring: bool, /// The backoff strategy when we miss slots. @@ -228,18 +235,19 @@ pub struct BuildAuraWorkerParams { /// Build the aura worker. /// /// The caller is responsible for running this worker, otherwise it will do nothing. -pub fn build_aura_worker( +pub fn build_aura_worker( BuildAuraWorkerParams { client, block_import, proposer_factory, sync_oracle, + justification_sync_link, backoff_authoring_blocks, keystore, block_proposal_slot_portion, telemetry, force_authoring, - }: BuildAuraWorkerParams, + }: BuildAuraWorkerParams, ) -> impl sc_consensus_slots::SlotWorker>::Proof> where B: BlockT, C: ProvideRuntimeApi + BlockOf + ProvideCache + AuxStore + HeaderBackend + Send + Sync, @@ -252,6 +260,7 @@ pub fn build_aura_worker( I: BlockImport> + Send + Sync + 'static, Error: std::error::Error + Send + From + 'static, SO: SyncOracle + Send + Sync + Clone, + L: sp_consensus::JustificationSyncLink, BS: BackoffAuthoringBlocksStrategy> + Send + 'static, { AuraWorker { @@ -260,6 +269,7 @@ pub fn build_aura_worker( env: proposer_factory, keystore, sync_oracle, + justification_sync_link, force_authoring, backoff_authoring_blocks, telemetry, @@ -268,12 +278,13 @@ pub fn build_aura_worker( } } -struct AuraWorker { +struct AuraWorker { client: Arc, block_import: I, env: E, keystore: SyncCryptoStorePtr, sync_oracle: SO, + justification_sync_link: L, force_authoring: bool, backoff_authoring_blocks: Option, block_proposal_slot_portion: SlotProportion, @@ -281,8 +292,8 @@ struct AuraWorker { _key_type: PhantomData

, } -impl sc_consensus_slots::SimpleSlotWorker - for AuraWorker +impl sc_consensus_slots::SimpleSlotWorker + for AuraWorker where B: BlockT, C: ProvideRuntimeApi + BlockOf + ProvideCache + HeaderBackend + Sync, @@ -294,11 +305,13 @@ where P::Public: AppPublic + Public + Member + Encode + Decode + Hash, P::Signature: TryFrom> + Member + Encode + Decode + Hash + Debug, SO: SyncOracle + Send + Clone, + L: sp_consensus::JustificationSyncLink, BS: BackoffAuthoringBlocksStrategy> + Send + 'static, Error: std::error::Error + Send + From + 'static, { type BlockImport = I; type SyncOracle = SO; + type JustificationSyncLink = L; type CreateProposer = Pin> + Send + 'static >>; @@ -425,6 +438,10 @@ where &mut self.sync_oracle } + fn justification_sync_link(&mut self) -> &mut Self::JustificationSyncLink { + &mut self.justification_sync_link + } + fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer { Box::pin(self.env.init(block).map_err(|e| { sp_consensus::Error::ClientImport(format!("{:?}", e)).into() @@ -725,13 +742,14 @@ mod tests { let slot_duration = slot_duration(&*client).expect("slot duration available"); - aura_futures.push(start_aura::(StartAuraParams { + aura_futures.push(start_aura::(StartAuraParams { slot_duration, block_import: client.clone(), select_chain, client, proposer_factory: environ, sync_oracle: DummyOracle, + justification_sync_link: (), create_inherent_data_providers: |_, _| async { let timestamp = TimestampInherentDataProvider::from_system_time(); let slot = InherentDataProvider::from_timestamp_and_duration( @@ -804,6 +822,7 @@ mod tests { env: environ, keystore: keystore.into(), sync_oracle: DummyOracle.clone(), + justification_sync_link: (), force_authoring: false, backoff_authoring_blocks: Some(BackoffAuthoringOnFinalizedHeadLagging::default()), telemetry: None, @@ -853,6 +872,7 @@ mod tests { env: environ, keystore: keystore.into(), sync_oracle: DummyOracle.clone(), + justification_sync_link: (), force_authoring: false, backoff_authoring_blocks: Option::<()>::None, telemetry: None, @@ -871,7 +891,7 @@ mod tests { duration: Duration::from_millis(1000), chain_head: head, block_size_limit: None, - }, + } )).unwrap(); // The returned block should be imported and we should be able to get its header by now. diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 0b02bbbe14106..409999ef1fdca 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -363,7 +363,7 @@ impl std::ops::Deref for Config { } /// Parameters for BABE. -pub struct BabeParams { +pub struct BabeParams { /// The keystore that manages the keys of the node. pub keystore: SyncCryptoStorePtr, @@ -384,8 +384,11 @@ pub struct BabeParams { /// A sync oracle pub sync_oracle: SO, + /// Hook into the sync module to control the justification sync process. + pub justification_sync_link: L, + /// Something that can create the inherent data providers. - pub create_inherent_data_providers: IDP, + pub create_inherent_data_providers: CIDP, /// Force authoring of blocks even if we are offline pub force_authoring: bool, @@ -411,13 +414,14 @@ pub struct BabeParams { } /// Start the babe worker. -pub fn start_babe(BabeParams { +pub fn start_babe(BabeParams { keystore, client, select_chain, env, block_import, sync_oracle, + justification_sync_link, create_inherent_data_providers, force_authoring, backoff_authoring_blocks, @@ -425,26 +429,35 @@ pub fn start_babe(BabeParams { can_author_with, block_proposal_slot_portion, telemetry, -}: BabeParams) -> Result< +}: BabeParams) -> Result< BabeWorker, sp_consensus::Error, > where B: BlockT, - C: ProvideRuntimeApi + ProvideCache + ProvideUncles + BlockchainEvents - + HeaderBackend + HeaderMetadata - + Send + Sync + 'static, + C: ProvideRuntimeApi + + ProvideCache + + ProvideUncles + + BlockchainEvents + + HeaderBackend + + HeaderMetadata + + Send + + Sync + + 'static, C::Api: BabeApi, SC: SelectChain + 'static, E: Environment + Send + Sync + 'static, E::Proposer: Proposer>, - I: BlockImport> + Send - + Sync + 'static, - Error: std::error::Error + Send + From + From + 'static, + I: BlockImport> + + Send + + Sync + + 'static, SO: SyncOracle + Send + Sync + Clone + 'static, - CAW: CanAuthorWith + Send + Sync + 'static, + L: sp_consensus::JustificationSyncLink + 'static, + CIDP: CreateInherentDataProviders + Send + Sync + 'static, + CIDP::InherentDataProviders: InherentDataProviderExt + Send, BS: BackoffAuthoringBlocksStrategy> + Send + 'static, - IDP: CreateInherentDataProviders + Send + Sync + 'static, - IDP::InherentDataProviders: InherentDataProviderExt + Send, + CAW: CanAuthorWith + Send + Sync + 'static, + Error: std::error::Error + Send + From + From + 'static, { const HANDLE_BUFFER_SIZE: usize = 1024; @@ -456,6 +469,7 @@ pub fn start_babe(BabeParams { block_import, env, sync_oracle: sync_oracle.clone(), + justification_sync_link, force_authoring, backoff_authoring_blocks, keystore, @@ -600,11 +614,12 @@ type SlotNotificationSinks = Arc< Mutex::Hash, NumberFor, Epoch>)>>> >; -struct BabeSlotWorker { +struct BabeSlotWorker { client: Arc, block_import: I, env: E, sync_oracle: SO, + justification_sync_link: L, force_authoring: bool, backoff_authoring_blocks: Option, keystore: SyncCryptoStorePtr, @@ -615,8 +630,8 @@ struct BabeSlotWorker { telemetry: Option, } -impl sc_consensus_slots::SimpleSlotWorker - for BabeSlotWorker +impl sc_consensus_slots::SimpleSlotWorker + for BabeSlotWorker where B: BlockT, C: ProvideRuntimeApi + @@ -628,12 +643,14 @@ where E::Proposer: Proposer>, I: BlockImport> + Send + Sync + 'static, SO: SyncOracle + Send + Clone, + L: sp_consensus::JustificationSyncLink, BS: BackoffAuthoringBlocksStrategy>, Error: std::error::Error + Send + From + From + 'static, { type EpochData = ViableEpochDescriptor, Epoch>; type Claim = (PreDigest, AuthorityId); type SyncOracle = SO; + type JustificationSyncLink = L; type CreateProposer = Pin> + Send + 'static >>; @@ -798,6 +815,10 @@ where &mut self.sync_oracle } + fn justification_sync_link(&mut self) -> &mut Self::JustificationSyncLink { + &mut self.justification_sync_link + } + fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer { Box::pin(self.env.init(block).map_err(|e| { sp_consensus::Error::ClientImport(format!("{:?}", e)) diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index d042f25399ee4..467de9683c689 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -390,9 +390,7 @@ fn rejects_empty_block() { }) } -fn run_one_test( - mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static, -) { +fn run_one_test(mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static) { sp_tracing::try_init_simple(); let mutator = Arc::new(mutator) as Mutator; @@ -473,6 +471,7 @@ fn run_one_test( babe_link: data.link.clone(), keystore, can_author_with: sp_consensus::AlwaysCanAuthor, + justification_sync_link: (), block_proposal_slot_portion: SlotProportion::new(0.5), telemetry: None, }).expect("Starts babe")); diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index 17cdae48cdb67..6688c14b6375d 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -527,20 +527,21 @@ pub fn import_queue( /// /// `pre_runtime` is a parameter that allows a custom additional pre-runtime digest to be inserted /// for blocks being built. This can encode authorship information, or just be a graffiti. -pub fn start_mining_worker( +pub fn start_mining_worker( block_import: BoxBlockImport>, client: Arc, select_chain: S, algorithm: Algorithm, mut env: E, mut sync_oracle: SO, + justification_sync_link: L, pre_runtime: Option>, create_inherent_data_providers: CIDP, timeout: Duration, build_time: Duration, can_author_with: CAW, ) -> ( - Arc>::Proof>>>, + Arc>::Proof>>>, impl Future, ) where Block: BlockT, @@ -552,14 +553,16 @@ pub fn start_mining_worker( E::Error: std::fmt::Debug, E::Proposer: Proposer>, SO: SyncOracle + Clone + Send + Sync + 'static, - CAW: CanAuthorWith + Clone + Send + 'static, + L: sp_consensus::JustificationSyncLink, CIDP: CreateInherentDataProviders, + CAW: CanAuthorWith + Clone + Send + 'static, { let mut timer = UntilImportedOrTimeout::new(client.import_notification_stream(), timeout); - let worker = Arc::new(Mutex::new(MiningWorker:: { + let worker = Arc::new(Mutex::new(MiningWorker { build: None, algorithm: algorithm.clone(), block_import, + justification_sync_link, })); let worker_ret = worker.clone(); diff --git a/client/consensus/pow/src/worker.rs b/client/consensus/pow/src/worker.rs index 18844e51ce418..e5d76592b7fd1 100644 --- a/client/consensus/pow/src/worker.rs +++ b/client/consensus/pow/src/worker.rs @@ -18,8 +18,12 @@ use std::{pin::Pin, time::Duration, collections::HashMap, borrow::Cow}; use sc_client_api::ImportNotifications; -use sp_runtime::{DigestItem, traits::Block as BlockT, generic::BlockId}; use sp_consensus::{Proposal, BlockOrigin, BlockImportParams, import_queue::BoxBlockImport}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT}, + DigestItem, +}; use futures::{prelude::*, task::{Context, Poll}}; use futures_timer::Delay; use log::*; @@ -57,18 +61,22 @@ pub struct MiningWorker< Block: BlockT, Algorithm: PowAlgorithm, C: sp_api::ProvideRuntimeApi, - Proof + L: sp_consensus::JustificationSyncLink, + Proof, > { pub(crate) build: Option>, pub(crate) algorithm: Algorithm, pub(crate) block_import: BoxBlockImport>, + pub(crate) justification_sync_link: L, } -impl MiningWorker where +impl MiningWorker +where Block: BlockT, C: sp_api::ProvideRuntimeApi, Algorithm: PowAlgorithm, Algorithm::Difficulty: 'static + Send, + L: sp_consensus::JustificationSyncLink, sp_api::TransactionFor: Send + 'static, { /// Get the current best hash. `None` if the worker has just started or the client is doing @@ -139,8 +147,11 @@ impl MiningWorker where Box::new(intermediate) as Box<_>, ); + let header = import_block.post_header(); match self.block_import.import_block(import_block, HashMap::default()).await { - Ok(_) => { + Ok(res) => { + res.handle_justification(&header.hash(), *header.number(), &mut self.justification_sync_link); + info!( target: "pow", "✅ Successfully mined block on top of: {}", diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index cc879f769e47f..188aa52881a78 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -39,7 +39,9 @@ use futures_timer::Delay; use log::{debug, error, info, warn}; use sp_api::{ProvideRuntimeApi, ApiRef}; use sp_arithmetic::traits::BaseArithmetic; -use sp_consensus::{BlockImport, Proposer, SyncOracle, SelectChain, CanAuthorWith, SlotData}; +use sp_consensus::{ + BlockImport, CanAuthorWith, JustificationSyncLink, Proposer, SelectChain, SlotData, SyncOracle, +}; use sp_consensus_slots::Slot; use sp_inherents::CreateInherentDataProviders; use sp_runtime::{ @@ -92,6 +94,10 @@ pub trait SimpleSlotWorker { /// A handle to a `SyncOracle`. type SyncOracle: SyncOracle; + /// A handle to a `JustificationSyncLink`, allows hooking into the sync module to control the + /// justification sync process. + type JustificationSyncLink: JustificationSyncLink; + /// The type of future resolving to the proposer. type CreateProposer: Future> + Send + Unpin + 'static; @@ -178,6 +184,9 @@ pub trait SimpleSlotWorker { /// Returns a handle to a `SyncOracle`. fn sync_oracle(&mut self) -> &mut Self::SyncOracle; + /// Returns a handle to a `JustificationSyncLink`. + fn justification_sync_link(&mut self) -> &mut Self::JustificationSyncLink; + /// Returns a `Proposer` to author on top of the given block. fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer; @@ -392,27 +401,37 @@ pub trait SimpleSlotWorker { ); let header = block_import_params.post_header(); - if let Err(err) = block_import + match block_import .import_block(block_import_params, Default::default()) .await { - warn!( - target: logging_target, - "Error with block built on {:?}: {:?}", - parent_hash, - err, - ); + Ok(res) => { + res.handle_justification( + &header.hash(), + *header.number(), + self.justification_sync_link(), + ); + } + Err(err) => { + warn!( + target: logging_target, + "Error with block built on {:?}: {:?}", parent_hash, err, + ); - telemetry!( - telemetry; - CONSENSUS_WARN; - "slots.err_with_block_built_on"; - "hash" => ?parent_hash, - "err" => ?err, - ); + telemetry!( + telemetry; + CONSENSUS_WARN; + "slots.err_with_block_built_on"; + "hash" => ?parent_hash, + "err" => ?err, + ); + } } - Some(SlotResult { block: B::new(header, body), storage_proof }) + Some(SlotResult { + block: B::new(header, body), + storage_proof, + }) } } @@ -481,7 +500,7 @@ where /// /// Every time a new slot is triggered, `worker.on_slot` is called and the future it returns is /// polled until completion, unless we are major syncing. -pub async fn start_slot_worker( +pub async fn start_slot_worker( slot_duration: SlotDuration, client: C, mut worker: W, @@ -495,9 +514,9 @@ where W: SlotWorker, SO: SyncOracle + Send, T: SlotData + Clone, - CAW: CanAuthorWith + Send, CIDP: CreateInherentDataProviders + Send, CIDP::InherentDataProviders: InherentDataProviderExt + Send, + CAW: CanAuthorWith + Send, { let SlotDuration(slot_duration) = slot_duration; diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 6431250c96f3a..a3a490e097780 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -966,6 +966,11 @@ impl Protocol { self.sync.request_justification(&hash, number) } + /// Clear all pending justification requests. + pub fn clear_justification_requests(&mut self) { + self.sync.clear_justification_requests(); + } + /// Request syncing for the given block from given set of peers. /// Uses `protocol` to queue a new block download request and tries to dispatch all pending /// requests. diff --git a/client/network/src/protocol/sync.rs b/client/network/src/protocol/sync.rs index f1b744c89a995..7b7ac721b5b47 100644 --- a/client/network/src/protocol/sync.rs +++ b/client/network/src/protocol/sync.rs @@ -632,6 +632,11 @@ impl ChainSync { }) } + /// Clear all pending justification requests. + pub fn clear_justification_requests(&mut self) { + self.extra_justifications.reset(); + } + /// Request syncing for the given block from given set of peers. // The implementation is similar to on_block_announce with unknown parent hash. pub fn set_sync_fork_request( @@ -1117,7 +1122,7 @@ impl ChainSync { number, hash ); - self.extra_justifications.reset() + self.clear_justification_requests(); } if aux.needs_justification { diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 6351f03a393ed..666108363f640 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -976,6 +976,13 @@ impl NetworkService { .unbounded_send(ServiceToWorkerMsg::RequestJustification(*hash, number)); } + /// Clear all pending justification requests. + pub fn clear_justification_requests(&self) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::ClearJustificationRequests); + } + /// Are we in the process of downloading the chain? pub fn is_major_syncing(&self) -> bool { self.is_major_syncing.load(Ordering::Relaxed) @@ -1219,6 +1226,16 @@ impl<'a, B: BlockT + 'static, H: ExHashT> sp_consensus::SyncOracle } } +impl sp_consensus::JustificationSyncLink for NetworkService { + fn request_justification(&self, hash: &B::Hash, number: NumberFor) { + NetworkService::request_justification(self, hash, number); + } + + fn clear_justification_requests(&self) { + NetworkService::clear_justification_requests(self); + } +} + impl NetworkStateInfo for NetworkService where B: sp_runtime::traits::Block, @@ -1323,6 +1340,7 @@ enum ServiceToWorkerMsg { PropagateTransaction(H), PropagateTransactions, RequestJustification(B::Hash, NumberFor), + ClearJustificationRequests, AnnounceBlock(B::Hash, Option>), GetValue(record::Key), PutValue(record::Key, Vec), @@ -1444,6 +1462,8 @@ impl Future for NetworkWorker { this.network_service.behaviour_mut().user_protocol_mut().announce_block(hash, data), ServiceToWorkerMsg::RequestJustification(hash, number) => this.network_service.behaviour_mut().user_protocol_mut().request_justification(&hash, number), + ServiceToWorkerMsg::ClearJustificationRequests => + this.network_service.behaviour_mut().user_protocol_mut().clear_justification_requests(), ServiceToWorkerMsg::PropagateTransaction(hash) => this.tx_handler_controller.propagate_transaction(hash), ServiceToWorkerMsg::PropagateTransactions => diff --git a/primitives/consensus/common/src/block_import.rs b/primitives/consensus/common/src/block_import.rs index 6e4fb98865015..31c3eb74457c3 100644 --- a/primitives/consensus/common/src/block_import.rs +++ b/primitives/consensus/common/src/block_import.rs @@ -68,6 +68,30 @@ impl ImportResult { ImportResult::Imported(aux) } + + /// Handles any necessary request for justifications (or clearing of pending requests) based on + /// the outcome of this block import. + pub fn handle_justification( + &self, + hash: &B::Hash, + number: NumberFor, + justification_sync_link: &mut dyn JustificationSyncLink, + ) where + B: BlockT, + { + match self { + ImportResult::Imported(aux) => { + if aux.clear_justification_requests { + justification_sync_link.clear_justification_requests(); + } + + if aux.needs_justification { + justification_sync_link.request_justification(hash, number); + } + } + _ => {} + } + } } /// Block data origin. @@ -354,3 +378,32 @@ pub trait JustificationImport { justification: Justification, ) -> Result<(), Self::Error>; } + +/// Control the synchronization process of block justifications. +/// +/// When importing blocks different consensus engines might require that +/// additional finality data is provided (i.e. a justification for the block). +/// This trait abstracts the required methods to issue those requests +pub trait JustificationSyncLink: Send + Sync { + /// Request a justification for the given block. + fn request_justification(&self, hash: &B::Hash, number: NumberFor); + + /// Clear all pending justification requests. + fn clear_justification_requests(&self); +} + +impl JustificationSyncLink for () { + fn request_justification(&self, _hash: &B::Hash, _number: NumberFor) {} + + fn clear_justification_requests(&self) {} +} + +impl> JustificationSyncLink for Arc { + fn request_justification(&self, hash: &B::Hash, number: NumberFor) { + L::request_justification(&*self, hash, number); + } + + fn clear_justification_requests(&self) { + L::clear_justification_requests(&*self); + } +} diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index 642b6b12e7d6f..37df7230fd62b 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -49,8 +49,8 @@ mod metrics; pub use self::error::Error; pub use block_import::{ - BlockImport, BlockOrigin, ForkChoiceStrategy, ImportedAux, BlockImportParams, BlockCheckParams, - ImportResult, JustificationImport, + BlockCheckParams, BlockImport, BlockImportParams, BlockOrigin, ForkChoiceStrategy, + ImportResult, ImportedAux, JustificationImport, JustificationSyncLink, }; pub use select_chain::SelectChain; pub use sp_state_machine::Backend as StateBackend; From 24a92c32680258275926021ae4da7db126ddf1d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Sun, 6 Jun 2021 09:07:29 +0100 Subject: [PATCH 08/61] arithmetic: fix PerThing pow (#9030) * arithmetic: add failing test for pow * arithmetic: fix PerThing::pow * Revert back to previous optimisations Co-authored-by: Gav Wood --- primitives/arithmetic/src/per_things.rs | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/primitives/arithmetic/src/per_things.rs b/primitives/arithmetic/src/per_things.rs index 29d5d2be73a1c..80d556486d563 100644 --- a/primitives/arithmetic/src/per_things.rs +++ b/primitives/arithmetic/src/per_things.rs @@ -639,20 +639,20 @@ macro_rules! implement_per_thing { impl Pow for $name { type Output = Self; - fn pow(self, exp: usize) -> Self::Output { + fn pow(mut self, exp: usize) -> Self::Output { if exp == 0 || self.is_one() { return Self::one() } + let mut result = self; let mut exp = exp - 1; while exp > 0 && !result.is_zero() { - if exp % 2 == 0 { - result = result.square(); - exp /= 2; - } else { + if exp % 2 != 0 { result = result * self; exp -= 1; } + self = self.square(); + exp /= 2; } result } @@ -1107,11 +1107,13 @@ macro_rules! implement_per_thing { $name::from_parts($max / 2).square(), ); - // x^3 - assert_eq!( - $name::from_parts($max / 2).saturating_pow(3), - $name::from_parts($max / 8), - ); + // x^2 .. x^16 + for n in 1..=16 { + assert_eq!( + $name::from_parts($max / 2).saturating_pow(n), + $name::from_parts(($max as u128 / 2u128.pow(n as u32)) as $type), + ); + } // 0^n == 0 assert_eq!( From 1085a9021134f39d527c1bc828e7904959b3cc1a Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 7 Jun 2021 11:06:38 +0200 Subject: [PATCH 09/61] Compact proof utilities in sp_trie. (#8574) * validation extension in sp_io * need paths * arc impl * missing host function in executor * io to pkdot * decode function. * encode primitive. * trailing tab * multiple patch * fix child trie logic * restore master versionning * bench compact proof size * trie-db 22.3 is needed * line width * split line * fixes for bench (additional root may not be needed as original issue was with empty proof). * revert compact from block size calculation. * New error type for compression. * Adding test (incomplete (failing)). Also lacking real proof checking (no good primitives in sp-trie crate). * There is currently no proof recording utility in sp_trie, removing test. * small test of child root in proof without a child proof. * remove empty test. * remove non compact proof size * Missing revert. * proof method to encode decode. --- client/db/src/bench.rs | 31 ++- primitives/state-machine/src/lib.rs | 57 +++++- primitives/trie/Cargo.toml | 2 +- primitives/trie/src/error.rs | 2 +- primitives/trie/src/lib.rs | 6 +- primitives/trie/src/storage_proof.rs | 56 ++++++ primitives/trie/src/trie_codec.rs | 259 +++++++++++++++++++++++++ utils/wasm-builder/src/wasm_project.rs | 3 +- 8 files changed, 407 insertions(+), 9 deletions(-) create mode 100644 primitives/trie/src/trie_codec.rs diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index ed53f52da3ce2..c198fb400408e 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -117,6 +117,7 @@ pub struct BenchmarkingState { read_write_tracker: RefCell, whitelist: RefCell>, proof_recorder: Option>, + proof_recorder_root: Cell, } impl BenchmarkingState { @@ -129,7 +130,7 @@ impl BenchmarkingState { let mut state = BenchmarkingState { state: RefCell::new(None), db: Cell::new(None), - root: Cell::new(root), + root: Cell::new(root.clone()), genesis: Default::default(), genesis_root: Default::default(), record: Default::default(), @@ -139,6 +140,7 @@ impl BenchmarkingState { read_write_tracker: Default::default(), whitelist: Default::default(), proof_recorder: record_proof.then(Default::default), + proof_recorder_root: Cell::new(root.clone()), }; state.add_whitelist_to_tracker(); @@ -166,7 +168,10 @@ impl BenchmarkingState { None => Arc::new(kvdb_memorydb::create(1)), }; self.db.set(Some(db.clone())); - self.proof_recorder.as_ref().map(|r| r.reset()); + if let Some(recorder) = &self.proof_recorder { + recorder.reset(); + self.proof_recorder_root.set(self.root.get()); + } let storage_db = Arc::new(StorageDb:: { db, proof_recorder: self.proof_recorder.clone(), @@ -516,7 +521,27 @@ impl StateBackend> for BenchmarkingState { } fn proof_size(&self) -> Option { - self.proof_recorder.as_ref().map(|recorder| recorder.estimate_encoded_size() as u32) + self.proof_recorder.as_ref().map(|recorder| { + let proof_size = recorder.estimate_encoded_size() as u32; + let proof = recorder.to_storage_proof(); + let proof_recorder_root = self.proof_recorder_root.get(); + if proof_recorder_root == Default::default() || proof_size == 1 { + // empty trie + proof_size + } else { + if let Some(size) = proof.encoded_compact_size::>(proof_recorder_root) { + size as u32 + } else { + panic!( + "proof rec root {:?}, root {:?}, genesis {:?}, rec_len {:?}", + self.proof_recorder_root.get(), + self.root.get(), + self.genesis_root, + proof_size, + ); + } + } + }) } } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 479184b4b9905..0508bfb780929 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1402,14 +1402,22 @@ mod tests { } } + fn test_compact(remote_proof: StorageProof, remote_root: &sp_core::H256) -> StorageProof { + let compact_remote_proof = remote_proof.into_compact_proof::( + remote_root.clone(), + ).unwrap(); + compact_remote_proof.to_storage_proof::(Some(remote_root)).unwrap().0 + } + #[test] fn prove_read_and_proof_check_works() { let child_info = ChildInfo::new_default(b"sub1"); let child_info = &child_info; // fetch read proof from 'remote' full node let remote_backend = trie_backend::tests::test_trie(); - let remote_root = remote_backend.storage_root(::std::iter::empty()).0; + let remote_root = remote_backend.storage_root(std::iter::empty()).0; let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap(); + let remote_proof = test_compact(remote_proof, &remote_root); // check proof locally let local_result1 = read_proof_check::( remote_root, @@ -1429,12 +1437,13 @@ mod tests { assert_eq!(local_result2, false); // on child trie let remote_backend = trie_backend::tests::test_trie(); - let remote_root = remote_backend.storage_root(::std::iter::empty()).0; + let remote_root = remote_backend.storage_root(std::iter::empty()).0; let remote_proof = prove_child_read( remote_backend, child_info, &[b"value3"], ).unwrap(); + let remote_proof = test_compact(remote_proof, &remote_root); let local_result1 = read_child_proof_check::( remote_root, remote_proof.clone(), @@ -1457,6 +1466,50 @@ mod tests { ); } + #[test] + fn compact_multiple_child_trie() { + // this root will be queried + let child_info1 = ChildInfo::new_default(b"sub1"); + // this root will not be include in proof + let child_info2 = ChildInfo::new_default(b"sub2"); + // this root will be include in proof + let child_info3 = ChildInfo::new_default(b"sub"); + let mut remote_backend = trie_backend::tests::test_trie(); + let (remote_root, transaction) = remote_backend.full_storage_root( + std::iter::empty(), + vec![ + (&child_info1, vec![ + (&b"key1"[..], Some(&b"val2"[..])), + (&b"key2"[..], Some(&b"val3"[..])), + ].into_iter()), + (&child_info2, vec![ + (&b"key3"[..], Some(&b"val4"[..])), + (&b"key4"[..], Some(&b"val5"[..])), + ].into_iter()), + (&child_info3, vec![ + (&b"key5"[..], Some(&b"val6"[..])), + (&b"key6"[..], Some(&b"val7"[..])), + ].into_iter()), + ].into_iter(), + ); + remote_backend.backend_storage_mut().consolidate(transaction); + remote_backend.essence.set_root(remote_root.clone()); + let remote_proof = prove_child_read( + remote_backend, + &child_info1, + &[b"key1"], + ).unwrap(); + let remote_proof = test_compact(remote_proof, &remote_root); + let local_result1 = read_child_proof_check::( + remote_root, + remote_proof.clone(), + &child_info1, + &[b"key1"], + ).unwrap(); + assert_eq!(local_result1.len(), 1); + assert_eq!(local_result1.get(&b"key1"[..]), Some(&Some(b"val2".to_vec()))); + } + #[test] fn child_storage_uuid() { diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index 4396550a48a8f..9584ae678d409 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -21,7 +21,7 @@ harness = false codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } sp-std = { version = "3.0.0", default-features = false, path = "../std" } hash-db = { version = "0.15.2", default-features = false } -trie-db = { version = "0.22.2", default-features = false } +trie-db = { version = "0.22.3", default-features = false } trie-root = { version = "0.16.0", default-features = false } memory-db = { version = "0.26.0", default-features = false } sp-core = { version = "3.0.0", default-features = false, path = "../core" } diff --git a/primitives/trie/src/error.rs b/primitives/trie/src/error.rs index 8e1d9b974ffd5..bdaa49b1156f7 100644 --- a/primitives/trie/src/error.rs +++ b/primitives/trie/src/error.rs @@ -26,7 +26,7 @@ pub enum Error { /// Bad format. BadFormat, /// Decoding error. - Decode(codec::Error) + Decode(codec::Error), } impl From for Error { diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 572283f1c027e..89bef715ba99a 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -23,6 +23,7 @@ mod error; mod node_header; mod node_codec; mod storage_proof; +mod trie_codec; mod trie_stream; use sp_std::{boxed::Box, marker::PhantomData, vec::Vec, borrow::Borrow}; @@ -35,7 +36,7 @@ pub use error::Error; pub use trie_stream::TrieStream; /// The Substrate format implementation of `NodeCodec`. pub use node_codec::NodeCodec; -pub use storage_proof::StorageProof; +pub use storage_proof::{StorageProof, CompactProof}; /// Various re-exports from the `trie-db` crate. pub use trie_db::{ Trie, TrieMut, DBValue, Recorder, CError, Query, TrieLayout, TrieConfiguration, nibble_ops, TrieDBIterator, @@ -45,6 +46,9 @@ pub use memory_db::KeyFunction; pub use memory_db::prefixed_key; /// Various re-exports from the `hash-db` crate. pub use hash_db::{HashDB as HashDBT, EMPTY_PREFIX}; +/// Trie codec reexport, mainly child trie support +/// for trie compact proof. +pub use trie_codec::{decode_compact, encode_compact, Error as CompactProofError}; #[derive(Default)] /// substrate trie layout diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index d8394a89de526..03668920509b8 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -31,6 +31,12 @@ pub struct StorageProof { trie_nodes: Vec>, } +/// Storage proof in compact form. +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] +pub struct CompactProof { + pub encoded_nodes: Vec>, +} + impl StorageProof { /// Constructs a storage proof from a subset of encoded trie nodes in a storage backend. pub fn new(trie_nodes: Vec>) -> Self { @@ -79,6 +85,56 @@ impl StorageProof { Self { trie_nodes } } + + /// Encode as a compact proof with default + /// trie layout. + pub fn into_compact_proof( + self, + root: H::Out, + ) -> Result>> { + crate::encode_compact::>(self, root) + } + + /// Returns the estimated encoded size of the compact proof. + /// + /// Runing this operation is a slow operation (build the whole compact proof) and should only be + /// in non sensitive path. + /// Return `None` on error. + pub fn encoded_compact_size(self, root: H::Out) -> Option { + let compact_proof = self.into_compact_proof::(root); + compact_proof.ok().map(|p| p.encoded_size()) + } + +} + +impl CompactProof { + /// Return an iterator on the compact encoded nodes. + pub fn iter_compact_encoded_nodes(&self) -> impl Iterator { + self.encoded_nodes.iter().map(Vec::as_slice) + } + + /// Decode to a full storage_proof. + /// + /// Method use a temporary `HashDB`, and `sp_trie::decode_compact` + /// is often better. + pub fn to_storage_proof( + &self, + expected_root: Option<&H::Out>, + ) -> Result<(StorageProof, H::Out), crate::CompactProofError>> { + let mut db = crate::MemoryDB::::new(&[]); + let root = crate::decode_compact::, _, _>( + &mut db, + self.iter_compact_encoded_nodes(), + expected_root, + )?; + Ok((StorageProof::new(db.drain().into_iter().filter_map(|kv| + if (kv.1).1 > 0 { + Some((kv.1).0) + } else { + None + } + ).collect()), root)) + } } /// An iterator over trie nodes constructed from a storage proof. The nodes are not guaranteed to diff --git a/primitives/trie/src/trie_codec.rs b/primitives/trie/src/trie_codec.rs new file mode 100644 index 0000000000000..efe3223580f3f --- /dev/null +++ b/primitives/trie/src/trie_codec.rs @@ -0,0 +1,259 @@ +// This file is part of Substrate. + +// Copyright (C) 2021-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Compact proof support. +//! +//! This uses compact proof from trie crate and extends +//! it to substrate specific layout and child trie system. + +use crate::{ + EMPTY_PREFIX, HashDBT, TrieHash, TrieError, TrieConfiguration, + CompactProof, StorageProof, +}; +use sp_std::boxed::Box; +use sp_std::vec::Vec; +use trie_db::Trie; +#[cfg(feature="std")] +use std::fmt; +#[cfg(feature="std")] +use std::error::Error as StdError; + + +/// Error for trie node decoding. +pub enum Error { + /// Verification failed due to root mismatch. + RootMismatch(TrieHash, TrieHash), + /// Missing nodes in proof. + IncompleteProof, + /// Compact node is not needed. + ExtraneousChildNode, + /// Child content with root not in proof. + ExtraneousChildProof(TrieHash), + /// Bad child trie root. + InvalidChildRoot(Vec, Vec), + /// Errors from trie crate. + TrieError(Box>), +} + +impl From>> for Error { + fn from(error: Box>) -> Self { + Error::TrieError(error) + } +} + +#[cfg(feature="std")] +impl StdError for Error { + fn description(&self) -> &str { + match self { + Error::InvalidChildRoot(..) => "Invalid child root error", + Error::TrieError(..) => "Trie db error", + Error::RootMismatch(..) => "Trie db error", + Error::IncompleteProof => "Incomplete proof", + Error::ExtraneousChildNode => "Extraneous child node", + Error::ExtraneousChildProof(..) => "Extraneous child proof", + } + } +} + +#[cfg(feature="std")] +impl fmt::Debug for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + ::fmt(&self, f) + } +} + +#[cfg(feature="std")] +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Error::InvalidChildRoot(k, v) => write!(f, "InvalidChildRoot at {:x?}: {:x?}", k, v), + Error::TrieError(e) => write!(f, "Trie error: {}", e), + Error::IncompleteProof => write!(f, "Incomplete proof"), + Error::ExtraneousChildNode => write!(f, "Child node content with no root in proof"), + Error::ExtraneousChildProof(root) => write!(f, "Proof of child trie {:x?} not in parent proof", root.as_ref()), + Error::RootMismatch(root, expected) => write!( + f, + "Verification error, root is {:x?}, expected: {:x?}", + root.as_ref(), + expected.as_ref(), + ), + } + } +} + +/// Decode a compact proof. +/// +/// Takes as input a destination `db` for decoded node and `encoded` +/// an iterator of compact encoded nodes. +/// +/// Child trie are decoded in order of child trie root present +/// in the top trie. +pub fn decode_compact<'a, L, DB, I>( + db: &mut DB, + encoded: I, + expected_root: Option<&TrieHash>, +) -> Result, Error> + where + L: TrieConfiguration, + DB: HashDBT + hash_db::HashDBRef, + I: IntoIterator, +{ + let mut nodes_iter = encoded.into_iter(); + let (top_root, _nb_used) = trie_db::decode_compact_from_iter::( + db, + &mut nodes_iter, + )?; + + // Only check root if expected root is passed as argument. + if let Some(expected_root) = expected_root { + if expected_root != &top_root { + return Err(Error::RootMismatch(top_root.clone(), expected_root.clone())); + } + } + + let mut child_tries = Vec::new(); + { + // fetch child trie roots + let trie = crate::TrieDB::::new(db, &top_root)?; + + let mut iter = trie.iter()?; + + let childtrie_roots = sp_core::storage::well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX; + if iter.seek(childtrie_roots).is_ok() { + loop { + match iter.next() { + Some(Ok((key, value))) if key.starts_with(childtrie_roots) => { + // we expect all default child trie root to be correctly encoded. + // see other child trie functions. + let mut root = TrieHash::::default(); + // still in a proof so prevent panic + if root.as_mut().len() != value.as_slice().len() { + return Err(Error::InvalidChildRoot(key, value)); + } + root.as_mut().copy_from_slice(value.as_ref()); + child_tries.push(root); + }, + // allow incomplete database error: we only + // require access to data in the proof. + Some(Err(error)) => match *error { + trie_db::TrieError::IncompleteDatabase(..) => (), + e => return Err(Box::new(e).into()), + }, + _ => break, + } + } + } + } + + if !HashDBT::::contains(db, &top_root, EMPTY_PREFIX) { + return Err(Error::IncompleteProof); + } + + let mut previous_extracted_child_trie = None; + for child_root in child_tries.into_iter() { + if previous_extracted_child_trie.is_none() { + let (top_root, _) = trie_db::decode_compact_from_iter::( + db, + &mut nodes_iter, + )?; + previous_extracted_child_trie = Some(top_root); + } + + // we do not early exit on root mismatch but try the + // other read from proof (some child root may be + // in proof without actual child content). + if Some(child_root) == previous_extracted_child_trie { + previous_extracted_child_trie = None; + } + } + + if let Some(child_root) = previous_extracted_child_trie { + // A child root was read from proof but is not present + // in top trie. + return Err(Error::ExtraneousChildProof(child_root)); + } + + if nodes_iter.next().is_some() { + return Err(Error::ExtraneousChildNode); + } + + Ok(top_root) +} + +/// Encode a compact proof. +/// +/// Takes as input all full encoded node from the proof, and +/// the root. +/// Then parse all child trie root and compress main trie content first +/// then all child trie contents. +/// Child trie are ordered by the order of their roots in the top trie. +pub fn encode_compact( + proof: StorageProof, + root: TrieHash, +) -> Result> + where + L: TrieConfiguration, +{ + let mut child_tries = Vec::new(); + let partial_db = proof.into_memory_db(); + let mut compact_proof = { + let trie = crate::TrieDB::::new(&partial_db, &root)?; + + let mut iter = trie.iter()?; + + let childtrie_roots = sp_core::storage::well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX; + if iter.seek(childtrie_roots).is_ok() { + loop { + match iter.next() { + Some(Ok((key, value))) if key.starts_with(childtrie_roots) => { + let mut root = TrieHash::::default(); + if root.as_mut().len() != value.as_slice().len() { + // some child trie root in top trie are not an encoded hash. + return Err(Error::InvalidChildRoot(key.to_vec(), value.to_vec())); + } + root.as_mut().copy_from_slice(value.as_ref()); + child_tries.push(root); + }, + // allow incomplete database error: we only + // require access to data in the proof. + Some(Err(error)) => match *error { + trie_db::TrieError::IncompleteDatabase(..) => (), + e => return Err(Box::new(e).into()), + }, + _ => break, + } + } + } + + trie_db::encode_compact::(&trie)? + }; + + for child_root in child_tries { + if !HashDBT::::contains(&partial_db, &child_root, EMPTY_PREFIX) { + // child proof are allowed to be missing (unused root can be included + // due to trie structure modification). + continue; + } + + let trie = crate::TrieDB::::new(&partial_db, &child_root)?; + let child_proof = trie_db::encode_compact::(&trie)?; + + compact_proof.extend(child_proof); + } + + Ok(CompactProof { encoded_nodes: compact_proof }) +} diff --git a/utils/wasm-builder/src/wasm_project.rs b/utils/wasm-builder/src/wasm_project.rs index 58161f53113fe..466c2145e6cee 100644 --- a/utils/wasm-builder/src/wasm_project.rs +++ b/utils/wasm-builder/src/wasm_project.rs @@ -232,7 +232,8 @@ fn create_project_cargo_toml( wasm_workspace_toml.insert("profile".into(), profile.into()); // Add patch section from the project root `Cargo.toml` - if let Some(mut patch) = workspace_toml.remove("patch").and_then(|p| p.try_into::().ok()) { + while let Some(mut patch) = workspace_toml.remove("patch") + .and_then(|p| p.try_into::
().ok()) { // Iterate over all patches and make the patch path absolute from the workspace root path. patch.iter_mut() .filter_map(|p| From 1fa8cf7cf9dbfe1b093b3e7e116dc3435c7f3f7b Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Mon, 7 Jun 2021 11:26:31 +0200 Subject: [PATCH 10/61] Don't inlucde nominaotrs that back no one in the snapshot. (#9017) --- frame/staking/src/lib.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 67726f69228f7..c8011faef1513 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -2518,8 +2518,10 @@ impl Module { .map_or(true, |spans| submitted_in >= spans.last_nonzero_slash()) }); - let vote_weight = weight_of(&nominator); - all_voters.push((nominator, vote_weight, targets)) + if !targets.is_empty() { + let vote_weight = weight_of(&nominator); + all_voters.push((nominator, vote_weight, targets)) + } } all_voters From 5d89967d7cc12d620bda9c9c042dbf7fcc4beb89 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Mon, 7 Jun 2021 15:00:03 +0200 Subject: [PATCH 11/61] Periodically call `Peerset::alloc_slots` on all sets (#9025) * Periodically call alloc_slots on all slots * Add test --- client/peerset/src/lib.rs | 55 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 54 insertions(+), 1 deletion(-) diff --git a/client/peerset/src/lib.rs b/client/peerset/src/lib.rs index eefab81b851da..19260afccb802 100644 --- a/client/peerset/src/lib.rs +++ b/client/peerset/src/lib.rs @@ -39,7 +39,7 @@ use futures::prelude::*; use log::{debug, error, trace}; use serde_json::json; use std::{collections::HashMap, pin::Pin, task::{Context, Poll}, time::Duration}; -use wasm_timer::Instant; +use wasm_timer::{Delay, Instant}; use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; pub use libp2p::PeerId; @@ -252,6 +252,9 @@ pub struct Peerset { created: Instant, /// Last time when we updated the reputations of connected nodes. latest_time_update: Instant, + /// Next time to do a periodic call to `alloc_slots` with all sets. This is done once per + /// second, to match the period of the reputation updates. + next_periodic_alloc_slots: Delay, } impl Peerset { @@ -279,6 +282,7 @@ impl Peerset { message_queue: VecDeque::new(), created: now, latest_time_update: now, + next_periodic_alloc_slots: Delay::new(Duration::new(0, 0)), } }; @@ -699,6 +703,14 @@ impl Stream for Peerset { return Poll::Ready(Some(message)); } + if let Poll::Ready(_) = Future::poll(Pin::new(&mut self.next_periodic_alloc_slots), cx) { + self.next_periodic_alloc_slots = Delay::new(Duration::new(1, 0)); + + for set_index in 0..self.data.num_sets() { + self.alloc_slots(SetId(set_index)); + } + } + let action = match Stream::poll_next(Pin::new(&mut self.rx), cx) { Poll::Pending => return Poll::Pending, Poll::Ready(Some(event)) => event, @@ -907,4 +919,45 @@ mod tests { futures::executor::block_on(fut); } + + #[test] + fn test_relloc_after_banned() { + let (mut peerset, handle) = Peerset::from_config(PeersetConfig { + sets: vec![SetConfig { + in_peers: 25, + out_peers: 25, + bootnodes: vec![], + reserved_nodes: Default::default(), + reserved_only: false, + }], + }); + + // We ban a node by setting its reputation under the threshold. + let peer_id = PeerId::random(); + handle.report_peer(peer_id.clone(), ReputationChange::new(BANNED_THRESHOLD - 1, "")); + + let fut = futures::future::poll_fn(move |cx| { + // We need one polling for the message to be processed. + assert_eq!(Stream::poll_next(Pin::new(&mut peerset), cx), Poll::Pending); + + // Check that an incoming connection from that node gets refused. + // This is already tested in other tests, but it is done again here because it doesn't + // hurt. + peerset.incoming(SetId::from(0), peer_id.clone(), IncomingIndex(1)); + if let Poll::Ready(msg) = Stream::poll_next(Pin::new(&mut peerset), cx) { + assert_eq!(msg.unwrap(), Message::Reject(IncomingIndex(1))); + } else { + panic!() + } + + // Wait for the peerset to change its mind and actually connect to it. + while let Poll::Ready(msg) = Stream::poll_next(Pin::new(&mut peerset), cx) { + assert_eq!(msg.unwrap(), Message::Connect { set_id: SetId::from(0), peer_id }); + } + + Poll::Ready(()) + }); + + futures::executor::block_on(fut); + } } From fa26ce6b4b59710cb402d76b9c4577c93d2f65d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Mon, 7 Jun 2021 19:40:23 +0200 Subject: [PATCH 12/61] contracts: Add new `seal_call` that offers new features (#8909) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add new `seal_call` that offers new features * Fix doc typo Co-authored-by: Michael Müller * Fix doc typos Co-authored-by: Michael Müller * Fix comment on assert * Update CHANGELOG.md Co-authored-by: Michael Müller --- Cargo.lock | 1 + frame/contracts/CHANGELOG.md | 3 + frame/contracts/Cargo.toml | 1 + frame/contracts/src/exec.rs | 162 +++++++++++++++++---- frame/contracts/src/lib.rs | 13 +- frame/contracts/src/wasm/mod.rs | 214 ++++++++++++++++++++++++++-- frame/contracts/src/wasm/runtime.rs | 180 +++++++++++++++++++---- 7 files changed, 503 insertions(+), 71 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 62056dd99b2e0..8a25ba6c7a480 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4838,6 +4838,7 @@ name = "pallet-contracts" version = "3.0.0" dependencies = [ "assert_matches", + "bitflags", "frame-benchmarking", "frame-support", "frame-system", diff --git a/frame/contracts/CHANGELOG.md b/frame/contracts/CHANGELOG.md index 76fc09ad17355..dd679f432d314 100644 --- a/frame/contracts/CHANGELOG.md +++ b/frame/contracts/CHANGELOG.md @@ -20,6 +20,9 @@ In other words: Upgrading this pallet will not break pre-existing contracts. ### Added +- New **unstable** version of `seal_call` that offers more features. +[#8909](https://github.com/paritytech/substrate/pull/8909) + - New **unstable** `seal_rent_params` and `seal_rent_status` contract callable function. [#8231](https://github.com/paritytech/substrate/pull/8231) [#8780](https://github.com/paritytech/substrate/pull/8780) diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index 71a45a9dfa6b4..9d344fb6866d7 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -13,6 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +bitflags = "1.0" codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } log = { version = "0.4", default-features = false } pwasm-utils = { version = "0.18", default-features = false } diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index d5b489d8912e3..f3a981347c981 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -167,6 +167,7 @@ pub trait Ext: sealing::Sealed { to: AccountIdOf, value: BalanceOf, input_data: Vec, + allows_reentry: bool, ) -> Result<(ExecReturnValue, u32), (ExecError, u32)>; /// Instantiate a contract from the given code. @@ -457,6 +458,8 @@ pub struct Frame { entry_point: ExportedFunction, /// The gas meter capped to the supplied gas limit. nested_meter: GasMeter, + /// If `false` the contract enabled its defense against reentrance attacks. + allows_reentry: bool, } /// Parameter passed in when creating a new `Frame`. @@ -731,6 +734,7 @@ where entry_point, nested_meter: gas_meter.nested(gas_limit) .map_err(|e| (e.into(), executable.code_len()))?, + allows_reentry: true, }; Ok((frame, executable)) @@ -1014,6 +1018,11 @@ where self.frames().skip(1).any(|f| &f.account_id == account_id) } + /// Returns whether the specified contract allows to be reentered right now. + fn allows_reentry(&self, id: &AccountIdOf) -> bool { + !self.frames().any(|f| &f.account_id == id && !f.allows_reentry) + } + /// Increments the cached account id and returns the value to be used for the trie_id. fn next_trie_seed(&mut self) -> u64 { let next = if let Some(current) = self.account_counter { @@ -1045,25 +1054,44 @@ where to: T::AccountId, value: BalanceOf, input_data: Vec, + allows_reentry: bool, ) -> Result<(ExecReturnValue, u32), (ExecError, u32)> { - // We ignore instantiate frames in our search for a cached contract. - // Otherwise it would be possible to recursively call a contract from its own - // constructor: We disallow calling not fully constructed contracts. - let cached_info = self - .frames() - .find(|f| f.entry_point == ExportedFunction::Call && f.account_id == to) - .and_then(|f| { - match &f.contract_info { - CachedContract::Cached(contract) => Some(contract.clone()), - _ => None, - } - }); - let executable = self.push_frame( - FrameArgs::Call{dest: to, cached_info}, - value, - gas_limit - )?; - self.run(executable, input_data) + // Before pushing the new frame: Protect the caller contract against reentrancy attacks. + // It is important to do this before calling `allows_reentry` so that a direct recursion + // is caught by it. + self.top_frame_mut().allows_reentry = allows_reentry; + + let try_call = || { + if !self.allows_reentry(&to) { + return Err((>::ReentranceDenied.into(), 0)); + } + // We ignore instantiate frames in our search for a cached contract. + // Otherwise it would be possible to recursively call a contract from its own + // constructor: We disallow calling not fully constructed contracts. + let cached_info = self + .frames() + .find(|f| f.entry_point == ExportedFunction::Call && f.account_id == to) + .and_then(|f| { + match &f.contract_info { + CachedContract::Cached(contract) => Some(contract.clone()), + _ => None, + } + }); + let executable = self.push_frame( + FrameArgs::Call{dest: to, cached_info}, + value, + gas_limit + )?; + self.run(executable, input_data) + }; + + // We need to make sure to reset `allows_reentry` even on failure. + let result = try_call(); + + // Protection is on a per call basis. + self.top_frame_mut().allows_reentry = true; + + result } fn instantiate( @@ -1097,7 +1125,7 @@ where beneficiary: &AccountIdOf, ) -> Result { if self.is_recursive() { - return Err((Error::::ReentranceDenied.into(), 0)); + return Err((Error::::TerminatedWhileReentrant.into(), 0)); } let frame = self.top_frame_mut(); let info = frame.terminate(); @@ -1125,7 +1153,7 @@ where delta: Vec, ) -> Result<(u32, u32), (DispatchError, u32, u32)> { if self.is_recursive() { - return Err((Error::::ReentranceDenied.into(), 0, 0)); + return Err((Error::::TerminatedWhileReentrant.into(), 0, 0)); } let origin_contract = self.top_frame_mut().contract_info().clone(); let result = Rent::::restore_to( @@ -1308,12 +1336,14 @@ mod tests { exec::ExportedFunction::*, Error, Weight, }; + use codec::{Encode, Decode}; use sp_core::Bytes; use sp_runtime::DispatchError; use assert_matches::assert_matches; use std::{cell::RefCell, collections::HashMap, rc::Rc}; use pretty_assertions::{assert_eq, assert_ne}; use pallet_contracts_primitives::ReturnFlags; + use frame_support::{assert_ok, assert_err}; type MockStack<'a> = Stack<'a, Test, MockExecutable>; @@ -1731,7 +1761,7 @@ mod tests { let value = Default::default(); let recurse_ch = MockLoader::insert(Call, |ctx, _| { // Try to call into yourself. - let r = ctx.ext.call(0, BOB, 0, vec![]); + let r = ctx.ext.call(0, BOB, 0, vec![], true); REACHED_BOTTOM.with(|reached_bottom| { let mut reached_bottom = reached_bottom.borrow_mut(); @@ -1789,7 +1819,7 @@ mod tests { // Call into CHARLIE contract. assert_matches!( - ctx.ext.call(0, CHARLIE, 0, vec![]), + ctx.ext.call(0, CHARLIE, 0, vec![], true), Ok(_) ); exec_success() @@ -1832,7 +1862,7 @@ mod tests { // Call into charlie contract. assert_matches!( - ctx.ext.call(0, CHARLIE, 0, vec![]), + ctx.ext.call(0, CHARLIE, 0, vec![], true), Ok(_) ); exec_success() @@ -2263,7 +2293,7 @@ mod tests { assert_ne!(original_allowance, changed_allowance); ctx.ext.set_rent_allowance(changed_allowance); assert_eq!( - ctx.ext.call(0, CHARLIE, 0, vec![]).map(|v| v.0).map_err(|e| e.0), + ctx.ext.call(0, CHARLIE, 0, vec![], true).map(|v| v.0).map_err(|e| e.0), exec_trapped() ); assert_eq!(ctx.ext.rent_allowance(), changed_allowance); @@ -2272,7 +2302,7 @@ mod tests { exec_success() }); let code_charlie = MockLoader::insert(Call, |ctx, _| { - assert!(ctx.ext.call(0, BOB, 0, vec![99]).is_ok()); + assert!(ctx.ext.call(0, BOB, 0, vec![99], true).is_ok()); exec_trapped() }); @@ -2299,7 +2329,7 @@ mod tests { fn recursive_call_during_constructor_fails() { let code = MockLoader::insert(Constructor, |ctx, _| { assert_matches!( - ctx.ext.call(0, ctx.ext.address().clone(), 0, vec![]), + ctx.ext.call(0, ctx.ext.address().clone(), 0, vec![], true), Err((ExecError{error, ..}, _)) if error == >::ContractNotFound.into() ); exec_success() @@ -2390,4 +2420,84 @@ mod tests { assert_eq!(&String::from_utf8(debug_buffer).unwrap(), "This is a testMore text"); } + + #[test] + fn call_reentry_direct_recursion() { + // call the contract passed as input with disabled reentry + let code_bob = MockLoader::insert(Call, |ctx, _| { + let dest = Decode::decode(&mut ctx.input_data.as_ref()).unwrap(); + ctx.ext.call(0, dest, 0, vec![], false).map(|v| v.0).map_err(|e| e.0) + }); + + let code_charlie = MockLoader::insert(Call, |_, _| { + exec_success() + }); + + ExtBuilder::default().build().execute_with(|| { + let schedule = ::Schedule::get(); + place_contract(&BOB, code_bob); + place_contract(&CHARLIE, code_charlie); + + // Calling another contract should succeed + assert_ok!(MockStack::run_call( + ALICE, + BOB, + &mut GasMeter::::new(GAS_LIMIT), + &schedule, + 0, + CHARLIE.encode(), + None, + )); + + // Calling into oneself fails + assert_err!( + MockStack::run_call( + ALICE, + BOB, + &mut GasMeter::::new(GAS_LIMIT), + &schedule, + 0, + BOB.encode(), + None, + ).map_err(|e| e.0.error), + >::ReentranceDenied, + ); + }); + } + + #[test] + fn call_deny_reentry() { + let code_bob = MockLoader::insert(Call, |ctx, _| { + if ctx.input_data[0] == 0 { + ctx.ext.call(0, CHARLIE, 0, vec![], false).map(|v| v.0).map_err(|e| e.0) + } else { + exec_success() + } + }); + + // call BOB with input set to '1' + let code_charlie = MockLoader::insert(Call, |ctx, _| { + ctx.ext.call(0, BOB, 0, vec![1], true).map(|v| v.0).map_err(|e| e.0) + }); + + ExtBuilder::default().build().execute_with(|| { + let schedule = ::Schedule::get(); + place_contract(&BOB, code_bob); + place_contract(&CHARLIE, code_charlie); + + // BOB -> CHARLIE -> BOB fails as BOB denies reentry. + assert_err!( + MockStack::run_call( + ALICE, + BOB, + &mut GasMeter::::new(GAS_LIMIT), + &schedule, + 0, + vec![0], + None, + ).map_err(|e| e.0.error), + >::ReentranceDenied, + ); + }); + } } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index fb4239adb24c3..f7dec843a7f7c 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -562,12 +562,11 @@ pub mod pallet { ContractTrapped, /// The size defined in `T::MaxValueSize` was exceeded. ValueTooLarge, - /// The action performed is not allowed while the contract performing it is already - /// on the call stack. Those actions are contract self destruction and restoration - /// of a tombstone. - ReentranceDenied, - /// `seal_input` was called twice from the same contract execution context. - InputAlreadyRead, + /// Termination of a contract is not allowed while the contract is already + /// on the call stack. Can be triggered by `seal_terminate` or `seal_restore_to. + TerminatedWhileReentrant, + /// `seal_call` forwarded this contracts input. It therefore is no longer available. + InputForwarded, /// The subject passed to `seal_random` exceeds the limit. RandomSubjectTooLong, /// The amount of topics passed to `seal_deposit_events` exceeds the limit. @@ -602,6 +601,8 @@ pub mod pallet { TerminatedInConstructor, /// The debug message specified to `seal_debug_message` does contain invalid UTF-8. DebugMessageInvalidUTF8, + /// A call tried to invoke a contract that is flagged as non-reentrant. + ReentranceDenied, } /// A mapping from an original code hash to the original code, untouched by instrumentation. diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index ed603732f6c02..5f9936c68dfbe 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -289,7 +289,14 @@ mod tests { struct TransferEntry { to: AccountIdOf, value: u64, + } + + #[derive(Debug, PartialEq, Eq)] + struct CallEntry { + to: AccountIdOf, + value: u64, data: Vec, + allows_reentry: bool, } pub struct MockExt { @@ -297,6 +304,7 @@ mod tests { rent_allowance: u64, instantiates: Vec, terminations: Vec, + calls: Vec, transfers: Vec, restores: Vec, // (topics, data) @@ -307,6 +315,11 @@ mod tests { debug_buffer: Vec, } + /// The call is mocked and just returns this hardcoded value. + fn call_return_data() -> Bytes { + Bytes(vec![0xDE, 0xAD, 0xBE, 0xEF]) + } + impl Default for MockExt { fn default() -> Self { Self { @@ -314,6 +327,7 @@ mod tests { rent_allowance: Default::default(), instantiates: Default::default(), terminations: Default::default(), + calls: Default::default(), transfers: Default::default(), restores: Default::default(), events: Default::default(), @@ -334,13 +348,15 @@ mod tests { to: AccountIdOf, value: u64, data: Vec, + allows_reentry: bool, ) -> Result<(ExecReturnValue, u32), (ExecError, u32)> { - self.transfers.push(TransferEntry { + self.calls.push(CallEntry { to, value, - data: data, + data, + allows_reentry, }); - Ok((ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }, 0)) + Ok((ExecReturnValue { flags: ReturnFlags::empty(), data: call_return_data() }, 0)) } fn instantiate( &mut self, @@ -374,7 +390,6 @@ mod tests { self.transfers.push(TransferEntry { to: to.clone(), value, - data: Vec::new(), }); Ok(()) } @@ -526,7 +541,6 @@ mod tests { &[TransferEntry { to: ALICE, value: 153, - data: Vec::new(), }] ); } @@ -587,11 +601,192 @@ mod tests { )); assert_eq!( - &mock_ext.transfers, - &[TransferEntry { + &mock_ext.calls, + &[CallEntry { to: ALICE, value: 6, data: vec![1, 2, 3, 4], + allows_reentry: true, + }] + ); + } + + #[test] + #[cfg(feature = "unstable-interface")] + fn contract_call_forward_input() { + const CODE: &str = r#" +(module + (import "__unstable__" "seal_call" (func $seal_call (param i32 i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "env" "memory" (memory 1 1)) + (func (export "call") + (drop + (call $seal_call + (i32.const 1) ;; Set FORWARD_INPUT bit + (i32.const 4) ;; Pointer to "callee" address. + (i32.const 32) ;; Length of "callee" address. + (i64.const 0) ;; How much gas to devote for the execution. 0 = all. + (i32.const 36) ;; Pointer to the buffer with value to transfer + (i32.const 8) ;; Length of the buffer with value to transfer. + (i32.const 44) ;; Pointer to input data buffer address + (i32.const 4) ;; Length of input data buffer + (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this case + ) + ) + + ;; triggers a trap because we already forwarded the input + (call $seal_input (i32.const 1) (i32.const 44)) + ) + + (func (export "deploy")) + + ;; Destination AccountId (ALICE) + (data (i32.const 4) + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + ) + + ;; Amount of value to transfer. + ;; Represented by u64 (8 bytes long) in little endian. + (data (i32.const 36) "\2A\00\00\00\00\00\00\00") + + ;; The input is ignored because we forward our own input + (data (i32.const 44) "\01\02\03\04") +) +"#; + let mut mock_ext = MockExt::default(); + let input = vec![0xff, 0x2a, 0x99, 0x88]; + frame_support::assert_err!( + execute(CODE, input.clone(), &mut mock_ext), + >::InputForwarded, + ); + + assert_eq!( + &mock_ext.calls, + &[CallEntry { + to: ALICE, + value: 0x2a, + data: input, + allows_reentry: false, + }] + ); + } + + #[test] + #[cfg(feature = "unstable-interface")] + fn contract_call_clone_input() { + const CODE: &str = r#" +(module + (import "__unstable__" "seal_call" (func $seal_call (param i32 i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "env" "memory" (memory 1 1)) + (func (export "call") + (drop + (call $seal_call + (i32.const 11) ;; Set FORWARD_INPUT | CLONE_INPUT | ALLOW_REENTRY bits + (i32.const 4) ;; Pointer to "callee" address. + (i32.const 32) ;; Length of "callee" address. + (i64.const 0) ;; How much gas to devote for the execution. 0 = all. + (i32.const 36) ;; Pointer to the buffer with value to transfer + (i32.const 8) ;; Length of the buffer with value to transfer. + (i32.const 44) ;; Pointer to input data buffer address + (i32.const 4) ;; Length of input data buffer + (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this case + ) + ) + + ;; works because the input was cloned + (call $seal_input (i32.const 0) (i32.const 44)) + + ;; return the input to caller for inspection + (call $seal_return (i32.const 0) (i32.const 0) (i32.load (i32.const 44))) + ) + + (func (export "deploy")) + + ;; Destination AccountId (ALICE) + (data (i32.const 4) + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + ) + + ;; Amount of value to transfer. + ;; Represented by u64 (8 bytes long) in little endian. + (data (i32.const 36) "\2A\00\00\00\00\00\00\00") + + ;; The input is ignored because we forward our own input + (data (i32.const 44) "\01\02\03\04") +) +"#; + let mut mock_ext = MockExt::default(); + let input = vec![0xff, 0x2a, 0x99, 0x88]; + let result = execute(CODE, input.clone(), &mut mock_ext).unwrap(); + assert_eq!(result.data.0, input); + assert_eq!( + &mock_ext.calls, + &[CallEntry { + to: ALICE, + value: 0x2a, + data: input, + allows_reentry: true, + }] + ); + } + + #[test] + #[cfg(feature = "unstable-interface")] + fn contract_call_tail_call() { + const CODE: &str = r#" +(module + (import "__unstable__" "seal_call" (func $seal_call (param i32 i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) + (import "env" "memory" (memory 1 1)) + (func (export "call") + (drop + (call $seal_call + (i32.const 5) ;; Set FORWARD_INPUT | TAIL_CALL bit + (i32.const 4) ;; Pointer to "callee" address. + (i32.const 32) ;; Length of "callee" address. + (i64.const 0) ;; How much gas to devote for the execution. 0 = all. + (i32.const 36) ;; Pointer to the buffer with value to transfer + (i32.const 8) ;; Length of the buffer with value to transfer. + (i32.const 0) ;; Pointer to input data buffer address + (i32.const 0) ;; Length of input data buffer + (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this case + ) + ) + + ;; a tail call never returns + (unreachable) + ) + + (func (export "deploy")) + + ;; Destination AccountId (ALICE) + (data (i32.const 4) + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + ) + + ;; Amount of value to transfer. + ;; Represented by u64 (8 bytes long) in little endian. + (data (i32.const 36) "\2A\00\00\00\00\00\00\00") +) +"#; + let mut mock_ext = MockExt::default(); + let input = vec![0xff, 0x2a, 0x99, 0x88]; + let result = execute(CODE, input.clone(), &mut mock_ext).unwrap(); + assert_eq!(result.data, call_return_data()); + assert_eq!( + &mock_ext.calls, + &[CallEntry { + to: ALICE, + value: 0x2a, + data: input, + allows_reentry: false, }] ); } @@ -772,11 +967,12 @@ mod tests { )); assert_eq!( - &mock_ext.transfers, - &[TransferEntry { + &mock_ext.calls, + &[CallEntry { to: ALICE, value: 6, data: vec![1, 2, 3, 4], + allows_reentry: true, }] ); } diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index 99dcab17cf12d..7ca6dfed15819 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -24,6 +24,7 @@ use crate::{ wasm::env_def::ConvertibleToWasm, schedule::HostFnWeights, }; +use bitflags::bitflags; use pwasm_utils::parity_wasm::elements::ValueType; use frame_support::{dispatch::DispatchError, ensure, traits::Get, weights::Weight}; use sp_std::prelude::*; @@ -318,6 +319,47 @@ where } } +bitflags! { + /// Flags used to change the behaviour of `seal_call`. + struct CallFlags: u32 { + /// Forward the input of current function to the callee. + /// + /// Supplied input pointers are ignored when set. + /// + /// # Note + /// + /// A forwarding call will consume the current contracts input. Any attempt to + /// access the input after this call returns will lead to [`Error::InputForwarded`]. + /// It does not matter if this is due to calling `seal_input` or trying another + /// forwarding call. Consider using [`Self::CLONE_INPUT`] in order to preserve + /// the input. + const FORWARD_INPUT = 0b0000_0001; + /// Identical to [`Self::FORWARD_INPUT`] but without consuming the input. + /// + /// This adds some additional weight costs to the call. + /// + /// # Note + /// + /// This implies [`Self::FORWARD_INPUT`] and takes precedence when both are set. + const CLONE_INPUT = 0b0000_0010; + /// Do not return from the call but rather return the result of the callee to the + /// callers caller. + /// + /// # Note + /// + /// This makes the current contract completely transparent to its caller by replacing + /// this contracts potential output by the callee ones. Any code after `seal_call` + /// can be safely considered unreachable. + const TAIL_CALL = 0b0000_0100; + /// Allow the callee to reenter into the current contract. + /// + /// Without this flag any reentrancy into the current contract that originates from + /// the callee (or any of its callees) is denied. This includes the first callee: + /// You cannot call into yourself with this flag set. + const ALLOW_REENTRY = 0b0000_1000; + } +} + /// This is only appropriate when writing out data of constant size that does not depend on user /// input. In this case the costs for this copy was already charged as part of the token at /// the beginning of the API entry point. @@ -402,8 +444,7 @@ where // // Because panics are really undesirable in the runtime code, we treat this as // a trap for now. Eventually, we might want to revisit this. - Err(sp_sandbox::Error::Module) => - Err("validation error")?, + Err(sp_sandbox::Error::Module) => Err("validation error")?, // Any other kind of a trap should result in a failure. Err(sp_sandbox::Error::Execution) | Err(sp_sandbox::Error::OutOfBounds) => Err(Error::::ContractTrapped)? @@ -629,6 +670,65 @@ where (err, _) => Self::err_into_return_code(err) } } + + fn call( + &mut self, + flags: CallFlags, + callee_ptr: u32, + callee_len: u32, + gas: u64, + value_ptr: u32, + value_len: u32, + input_data_ptr: u32, + input_data_len: u32, + output_ptr: u32, + output_len_ptr: u32 + ) -> Result { + self.charge_gas(RuntimeCosts::CallBase(input_data_len))?; + let callee: <::T as frame_system::Config>::AccountId = + self.read_sandbox_memory_as(callee_ptr, callee_len)?; + let value: BalanceOf<::T> = self.read_sandbox_memory_as(value_ptr, value_len)?; + let input_data = if flags.contains(CallFlags::CLONE_INPUT) { + self.input_data.as_ref().ok_or_else(|| Error::::InputForwarded)?.clone() + } else if flags.contains(CallFlags::FORWARD_INPUT) { + self.input_data.take().ok_or_else(|| Error::::InputForwarded)? + } else { + self.read_sandbox_memory(input_data_ptr, input_data_len)? + }; + if value > 0u32.into() { + self.charge_gas(RuntimeCosts::CallSurchargeTransfer)?; + } + let charged = self.charge_gas( + RuntimeCosts::CallSurchargeCodeSize(::Schedule::get().limits.code_len) + )?; + let ext = &mut self.ext; + let call_outcome = ext.call( + gas, callee, value, input_data, flags.contains(CallFlags::ALLOW_REENTRY), + ); + let code_len = match &call_outcome { + Ok((_, len)) => len, + Err((_, len)) => len, + }; + self.adjust_gas(charged, RuntimeCosts::CallSurchargeCodeSize(*code_len)); + + // `TAIL_CALL` only matters on an `OK` result. Otherwise the call stack comes to + // a halt anyways without anymore code being executed. + if flags.contains(CallFlags::TAIL_CALL) { + if let Ok((return_value, _)) = call_outcome { + return Err(TrapReason::Return(ReturnData { + flags: return_value.flags.bits(), + data: return_value.data.0, + })); + } + } + + if let Ok((output, _)) = &call_outcome { + self.write_sandbox_output(output_ptr, output_len_ptr, &output.data, true, |len| { + Some(RuntimeCosts::CallCopyOut(len)) + })?; + } + Ok(Runtime::::exec_into_return_code(call_outcome.map(|r| r.0).map_err(|r| r.0))?) + } } // *********************************************************** @@ -758,6 +858,36 @@ define_env!(Env, , } }, + // Make a call to another contract. + // + // This is equivalent to calling the newer version of this function with + // `flags` set to `ALLOW_REENTRY`. See the newer version for documentation. + [seal0] seal_call( + ctx, + callee_ptr: u32, + callee_len: u32, + gas: u64, + value_ptr: u32, + value_len: u32, + input_data_ptr: u32, + input_data_len: u32, + output_ptr: u32, + output_len_ptr: u32 + ) -> ReturnCode => { + ctx.call( + CallFlags::ALLOW_REENTRY, + callee_ptr, + callee_len, + gas, + value_ptr, + value_len, + input_data_ptr, + input_data_len, + output_ptr, + output_len_ptr, + ) + }, + // Make a call to another contract. // // The callees output buffer is copied to `output_ptr` and its length to `output_len_ptr`. @@ -766,6 +896,7 @@ define_env!(Env, , // // # Parameters // + // - flags: See [`CallFlags`] for a documenation of the supported flags. // - callee_ptr: a pointer to the address of the callee contract. // Should be decodable as an `T::AccountId`. Traps otherwise. // - callee_len: length of the address buffer. @@ -789,8 +920,9 @@ define_env!(Env, , // `ReturnCode::BelowSubsistenceThreshold` // `ReturnCode::TransferFailed` // `ReturnCode::NotCallable` - [seal0] seal_call( + [__unstable__] seal_call( ctx, + flags: u32, callee_ptr: u32, callee_len: u32, gas: u64, @@ -801,30 +933,18 @@ define_env!(Env, , output_ptr: u32, output_len_ptr: u32 ) -> ReturnCode => { - ctx.charge_gas(RuntimeCosts::CallBase(input_data_len))?; - let callee: <::T as frame_system::Config>::AccountId = - ctx.read_sandbox_memory_as(callee_ptr, callee_len)?; - let value: BalanceOf<::T> = ctx.read_sandbox_memory_as(value_ptr, value_len)?; - let input_data = ctx.read_sandbox_memory(input_data_ptr, input_data_len)?; - if value > 0u32.into() { - ctx.charge_gas(RuntimeCosts::CallSurchargeTransfer)?; - } - let charged = ctx.charge_gas( - RuntimeCosts::CallSurchargeCodeSize(::Schedule::get().limits.code_len) - )?; - let ext = &mut ctx.ext; - let call_outcome = ext.call(gas, callee, value, input_data); - let code_len = match &call_outcome { - Ok((_, len)) => len, - Err((_, len)) => len, - }; - ctx.adjust_gas(charged, RuntimeCosts::CallSurchargeCodeSize(*code_len)); - if let Ok((output, _)) = &call_outcome { - ctx.write_sandbox_output(output_ptr, output_len_ptr, &output.data, true, |len| { - Some(RuntimeCosts::CallCopyOut(len)) - })?; - } - Ok(Runtime::::exec_into_return_code(call_outcome.map(|r| r.0).map_err(|r| r.0))?) + ctx.call( + CallFlags::from_bits(flags).ok_or_else(|| "used rerved bit in CallFlags")?, + callee_ptr, + callee_len, + gas, + value_ptr, + value_len, + input_data_ptr, + input_data_len, + output_ptr, + output_len_ptr, + ) }, // Instantiate a contract with the specified code hash. @@ -945,7 +1065,6 @@ define_env!(Env, , ctx.charge_gas(RuntimeCosts::Terminate)?; let beneficiary: <::T as frame_system::Config>::AccountId = ctx.read_sandbox_memory_as(beneficiary_ptr, beneficiary_len)?; - let charged = ctx.charge_gas( RuntimeCosts::TerminateSurchargeCodeSize( ::Schedule::get().limits.code_len @@ -969,16 +1088,17 @@ define_env!(Env, , // // # Note // - // This function can only be called once. Calling it multiple times will trigger a trap. + // This function traps if the input was previously forwarded by a `seal_call`. [seal0] seal_input(ctx, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeCosts::InputBase)?; if let Some(input) = ctx.input_data.take() { ctx.write_sandbox_output(out_ptr, out_len_ptr, &input, false, |len| { Some(RuntimeCosts::InputCopyOut(len)) })?; + ctx.input_data = Some(input); Ok(()) } else { - Err(Error::::InputAlreadyRead.into()) + Err(Error::::InputForwarded.into()) } }, From d0891796ed48acfad7445b884c8d035dc4a2487a Mon Sep 17 00:00:00 2001 From: Xiliang Chen Date: Tue, 8 Jun 2021 20:59:19 +1200 Subject: [PATCH 13/61] fix unreserve_all_named (#9042) --- frame/balances/src/tests.rs | 73 +++++++++++++++++++ .../src/traits/tokens/currency/reservable.rs | 2 +- 2 files changed, 74 insertions(+), 1 deletion(-) diff --git a/frame/balances/src/tests.rs b/frame/balances/src/tests.rs index 9589fb25805b4..43d3c2fc6009e 100644 --- a/frame/balances/src/tests.rs +++ b/frame/balances/src/tests.rs @@ -1082,5 +1082,78 @@ macro_rules! decl_tests { assert_eq!(Balances::free_balance(&1), 52); }); } + + #[test] + fn ensure_reserved_named_should_work() { + <$ext_builder>::default().build().execute_with(|| { + let _ = Balances::deposit_creating(&1, 111); + + let id = [1u8; 8]; + + assert_ok!(Balances::ensure_reserved_named(&id, &1, 15)); + assert_eq!(Balances::reserved_balance_named(&id, &1), 15); + + assert_ok!(Balances::ensure_reserved_named(&id, &1, 10)); + assert_eq!(Balances::reserved_balance_named(&id, &1), 10); + + assert_ok!(Balances::ensure_reserved_named(&id, &1, 20)); + assert_eq!(Balances::reserved_balance_named(&id, &1), 20); + }); + } + + #[test] + fn unreserve_all_named_should_work() { + <$ext_builder>::default().build().execute_with(|| { + let _ = Balances::deposit_creating(&1, 111); + + let id = [1u8; 8]; + + assert_ok!(Balances::reserve_named(&id, &1, 15)); + + assert_eq!(Balances::unreserve_all_named(&id, &1), 15); + assert_eq!(Balances::reserved_balance_named(&id, &1), 0); + assert_eq!(Balances::free_balance(&1), 111); + + assert_eq!(Balances::unreserve_all_named(&id, &1), 0); + }); + } + + #[test] + fn slash_all_reserved_named_should_work() { + <$ext_builder>::default().build().execute_with(|| { + let _ = Balances::deposit_creating(&1, 111); + + let id = [1u8; 8]; + + assert_ok!(Balances::reserve_named(&id, &1, 15)); + + assert_eq!(Balances::slash_all_reserved_named(&id, &1).peek(), 15); + assert_eq!(Balances::reserved_balance_named(&id, &1), 0); + assert_eq!(Balances::free_balance(&1), 96); + + assert_eq!(Balances::slash_all_reserved_named(&id, &1).peek(), 0); + }); + } + + #[test] + fn repatriate_all_reserved_named_should_work() { + <$ext_builder>::default().build().execute_with(|| { + let _ = Balances::deposit_creating(&1, 111); + let _ = Balances::deposit_creating(&2, 10); + let _ = Balances::deposit_creating(&3, 10); + + let id = [1u8; 8]; + + assert_ok!(Balances::reserve_named(&id, &1, 15)); + + assert_ok!(Balances::repatriate_all_reserved_named(&id, &1, &2, Status::Reserved)); + assert_eq!(Balances::reserved_balance_named(&id, &1), 0); + assert_eq!(Balances::reserved_balance_named(&id, &2), 15); + + assert_ok!(Balances::repatriate_all_reserved_named(&id, &2, &3, Status::Free)); + assert_eq!(Balances::reserved_balance_named(&id, &2), 0); + assert_eq!(Balances::free_balance(&3), 25); + }); + } } } diff --git a/frame/support/src/traits/tokens/currency/reservable.rs b/frame/support/src/traits/tokens/currency/reservable.rs index 17dee7a8ae65a..69017357cfa84 100644 --- a/frame/support/src/traits/tokens/currency/reservable.rs +++ b/frame/support/src/traits/tokens/currency/reservable.rs @@ -166,7 +166,7 @@ pub trait NamedReservableCurrency: ReservableCurrency { /// Is a no-op if the value to be unreserved is zero. fn unreserve_all_named(id: &Self::ReserveIdentifier, who: &AccountId) -> Self::Balance { let value = Self::reserved_balance_named(id, who); - Self::slash_reserved_named(id, who, value); + Self::unreserve_named(id, who, value); value } From 0af6df59aab21d5a23907faf236be1ff235b7581 Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Tue, 8 Jun 2021 12:16:56 +0100 Subject: [PATCH 14/61] Delete legacy runtime metadata macros (#9043) --- frame/support/src/lib.rs | 4 +- frame/support/src/metadata.rs | 660 ---------------------------------- 2 files changed, 2 insertions(+), 662 deletions(-) delete mode 100644 frame/support/src/metadata.rs diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 57ab1d6febde7..4e830c26691e8 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -46,6 +46,8 @@ pub use sp_io::{storage::root as storage_root, self}; pub use sp_runtime::RuntimeDebug; #[doc(hidden)] pub use log; +#[doc(hidden)] +pub use frame_metadata as metadata; #[macro_use] mod origin; @@ -56,8 +58,6 @@ mod hash; #[macro_use] pub mod event; #[macro_use] -pub mod metadata; -#[macro_use] pub mod genesis_config; #[macro_use] pub mod inherent; diff --git a/frame/support/src/metadata.rs b/frame/support/src/metadata.rs deleted file mode 100644 index d0c59a0dfdc1d..0000000000000 --- a/frame/support/src/metadata.rs +++ /dev/null @@ -1,660 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub use frame_metadata::{ - DecodeDifferent, FnEncode, RuntimeMetadata, ModuleMetadata, RuntimeMetadataLastVersion, - DefaultByteGetter, RuntimeMetadataPrefixed, StorageEntryMetadata, StorageMetadata, - StorageEntryType, StorageEntryModifier, DefaultByte, StorageHasher, ModuleErrorMetadata, - ExtrinsicMetadata, -}; - -/// Implements the metadata support for the given runtime and all its modules. -/// -/// Example: -/// ``` -///# mod module0 { -///# pub trait Config: 'static { -///# type Origin; -///# type BlockNumber; -///# type PalletInfo: frame_support::traits::PalletInfo; -///# type DbWeight: frame_support::traits::Get; -///# } -///# frame_support::decl_module! { -///# pub struct Module for enum Call where origin: T::Origin, system=self {} -///# } -///# -///# frame_support::decl_storage! { -///# trait Store for Module as TestStorage {} -///# } -///# } -///# use module0 as module1; -///# use module0 as module2; -///# impl frame_support::traits::PalletInfo for Runtime { -///# fn index() -> Option { unimplemented!() } -///# fn name() -> Option<&'static str> { unimplemented!() } -///# } -///# impl module0::Config for Runtime { -///# type Origin = u32; -///# type BlockNumber = u32; -///# type PalletInfo = Self; -///# type DbWeight = (); -///# } -///# -///# type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic<(), (), (), ()>; -/// -/// struct Runtime; -/// frame_support::impl_runtime_metadata! { -/// for Runtime with pallets where Extrinsic = UncheckedExtrinsic -/// module0::Module as Module0 { index 0 } with, -/// module1::Module as Module1 { index 1 } with, -/// module2::Module as Module2 { index 2 } with Storage, -/// }; -/// ``` -/// -/// In this example, just `MODULE3` implements the `Storage` trait. -#[macro_export] -macro_rules! impl_runtime_metadata { - ( - for $runtime:ident with pallets where Extrinsic = $ext:ident - $( $rest:tt )* - ) => { - impl $runtime { - pub fn metadata() -> $crate::metadata::RuntimeMetadataPrefixed { - $crate::metadata::RuntimeMetadataLastVersion { - modules: $crate::__runtime_modules_to_metadata!($runtime;; $( $rest )*), - extrinsic: $crate::metadata::ExtrinsicMetadata { - version: <$ext as $crate::sp_runtime::traits::ExtrinsicMetadata>::VERSION, - signed_extensions: < - < - $ext as $crate::sp_runtime::traits::ExtrinsicMetadata - >::SignedExtensions as $crate::sp_runtime::traits::SignedExtension - >::identifier() - .into_iter() - .map($crate::metadata::DecodeDifferent::Encode) - .collect(), - }, - }.into() - } - } - } -} - -#[macro_export] -#[doc(hidden)] -macro_rules! __runtime_modules_to_metadata { - ( - $runtime: ident; - $( $metadata:expr ),*; - $mod:ident::$module:ident $( < $instance:ident > )? as $name:ident - { index $index:tt } - $(with)+ $($kw:ident)* - , - $( $rest:tt )* - ) => { - $crate::__runtime_modules_to_metadata!( - $runtime; - $( $metadata, )* $crate::metadata::ModuleMetadata { - name: $crate::metadata::DecodeDifferent::Encode(stringify!($name)), - index: $index, - storage: $crate::__runtime_modules_to_metadata_calls_storage!( - $mod, $module $( <$instance> )?, $runtime, $(with $kw)* - ), - calls: $crate::__runtime_modules_to_metadata_calls_call!( - $mod, $module $( <$instance> )?, $runtime, $(with $kw)* - ), - event: $crate::__runtime_modules_to_metadata_calls_event!( - $mod, $module $( <$instance> )?, $runtime, $(with $kw)* - ), - constants: $crate::metadata::DecodeDifferent::Encode( - $crate::metadata::FnEncode( - $mod::$module::<$runtime $(, $mod::$instance )?>::module_constants_metadata - ) - ), - errors: $crate::metadata::DecodeDifferent::Encode( - $crate::metadata::FnEncode( - <$mod::$module::<$runtime $(, $mod::$instance )?> as $crate::metadata::ModuleErrorMetadata>::metadata - ) - ) - }; - $( $rest )* - ) - }; - ( - $runtime:ident; - $( $metadata:expr ),*; - ) => { - $crate::metadata::DecodeDifferent::Encode(&[ $( $metadata ),* ]) - }; -} - -#[macro_export] -#[doc(hidden)] -macro_rules! __runtime_modules_to_metadata_calls_call { - ( - $mod: ident, - $module: ident $( <$instance:ident> )?, - $runtime: ident, - with Call - $(with $kws:ident)* - ) => { - Some($crate::metadata::DecodeDifferent::Encode( - $crate::metadata::FnEncode( - $mod::$module::<$runtime $(, $mod::$instance )?>::call_functions - ) - )) - }; - ( - $mod: ident, - $module: ident $( <$instance:ident> )?, - $runtime: ident, - with $_:ident - $(with $kws:ident)* - ) => { - $crate::__runtime_modules_to_metadata_calls_call! { - $mod, $module $( <$instance> )?, $runtime, $(with $kws)* - }; - }; - ( - $mod: ident, - $module: ident $( <$instance:ident> )?, - $runtime: ident, - ) => { - None - }; -} - - -#[macro_export] -#[doc(hidden)] -macro_rules! __runtime_modules_to_metadata_calls_event { - ( - $mod: ident, - $module: ident $( <$instance:ident> )?, - $runtime: ident, - with Event - $(with $kws:ident)* - ) => { - Some($crate::metadata::DecodeDifferent::Encode( - $crate::metadata::FnEncode( - $crate::paste::expr!{ - $runtime:: [< __module_events_ $mod $(_ $instance)?>] - } - ) - )) - }; - ( - $mod: ident, - $module: ident $( <$instance:ident> )?, - $runtime: ident, - with $_:ident - $(with $kws:ident)* - ) => { - $crate::__runtime_modules_to_metadata_calls_event!( $mod, $module $( <$instance> )?, $runtime, $(with $kws)* ); - }; - ( - $mod: ident, - $module: ident $( <$instance:ident> )?, - $runtime: ident, - ) => { - None - }; -} - -#[macro_export] -#[doc(hidden)] -macro_rules! __runtime_modules_to_metadata_calls_storage { - ( - $mod: ident, - $module: ident $( <$instance:ident> )?, - $runtime: ident, - with Storage - $(with $kws:ident)* - ) => { - Some($crate::metadata::DecodeDifferent::Encode( - $crate::metadata::FnEncode( - $mod::$module::<$runtime $(, $mod::$instance )?>::storage_metadata - ) - )) - }; - ( - $mod: ident, - $module: ident $( <$instance:ident> )?, - $runtime: ident, - with $_:ident - $(with $kws:ident)* - ) => { - $crate::__runtime_modules_to_metadata_calls_storage! { - $mod, $module $( <$instance> )?, $runtime, $(with $kws)* - }; - }; - ( - $mod: ident, - $module: ident $( <$instance:ident> )?, - $runtime: ident, - ) => { - None - }; -} - - -#[cfg(test)] -// Do not complain about unused `dispatch` and `dispatch_aux`. -#[allow(dead_code)] -mod tests { - use super::*; - use frame_metadata::{ - EventMetadata, StorageEntryModifier, StorageEntryType, FunctionMetadata, StorageEntryMetadata, - ModuleMetadata, RuntimeMetadataPrefixed, DefaultByte, ModuleConstantMetadata, DefaultByteGetter, - ErrorMetadata, ExtrinsicMetadata, - }; - use codec::{Encode, Decode}; - use crate::traits::Get; - use sp_runtime::transaction_validity::TransactionValidityError; - - #[derive(Clone, Eq, Debug, PartialEq, Encode, Decode)] - struct TestExtension; - impl sp_runtime::traits::SignedExtension for TestExtension { - type AccountId = u32; - type Call = (); - type AdditionalSigned = u32; - type Pre = (); - const IDENTIFIER: &'static str = "testextension"; - fn additional_signed(&self) -> Result { - Ok(1) - } - } - - #[derive(Clone, Eq, Debug, PartialEq, Encode, Decode)] - struct TestExtension2; - impl sp_runtime::traits::SignedExtension for TestExtension2 { - type AccountId = u32; - type Call = (); - type AdditionalSigned = u32; - type Pre = (); - const IDENTIFIER: &'static str = "testextension2"; - fn additional_signed(&self) -> Result { - Ok(1) - } - } - - struct TestExtrinsic; - - impl sp_runtime::traits::ExtrinsicMetadata for TestExtrinsic { - const VERSION: u8 = 1; - type SignedExtensions = (TestExtension, TestExtension2); - } - - mod system { - use super::*; - - pub trait Config: 'static { - type BaseCallFilter; - const ASSOCIATED_CONST: u64 = 500; - type Origin: Into, Self::Origin>> - + From>; - type AccountId: From + Encode; - type BlockNumber: From + Encode; - type SomeValue: Get; - type PalletInfo: crate::traits::PalletInfo; - type DbWeight: crate::traits::Get; - type Call; - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self { - /// Hi, I am a comment. - const BlockNumber: T::BlockNumber = 100.into(); - const GetType: T::AccountId = T::SomeValue::get().into(); - const ASSOCIATED_CONST: u64 = T::ASSOCIATED_CONST.into(); - } - } - - decl_event!( - pub enum Event { - SystemEvent, - } - ); - - #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] - pub enum RawOrigin { - Root, - Signed(AccountId), - None, - } - - impl From> for RawOrigin { - fn from(s: Option) -> RawOrigin { - match s { - Some(who) => RawOrigin::Signed(who), - None => RawOrigin::None, - } - } - } - - pub type Origin = RawOrigin<::AccountId>; - } - - mod event_module { - use crate::dispatch::DispatchResult; - use super::system; - - pub trait Config: system::Config { - type Balance; - } - - decl_event!( - pub enum Event where ::Balance - { - /// Hi, I am a comment. - TestEvent(Balance), - } - ); - - decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=system { - type Error = Error; - - #[weight = 0] - fn aux_0(_origin) -> DispatchResult { unreachable!() } - } - } - - crate::decl_error! { - pub enum Error for Module { - /// Some user input error - UserInputError, - /// Something bad happened - /// this could be due to many reasons - BadThingHappened, - } - } - } - - mod event_module2 { - use super::system; - - pub trait Config: system::Config { - type Balance; - } - - decl_event!( - pub enum Event where ::Balance - { - TestEvent(Balance), - } - ); - - decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=system {} - } - - crate::decl_storage! { - trait Store for Module as TestStorage { - StorageMethod : Option; - } - add_extra_genesis { - build(|_| {}); - } - } - } - - type EventModule = event_module::Module; - type EventModule2 = event_module2::Module; - - #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)] - pub struct TestRuntime; - - impl crate::traits::PalletInfo for TestRuntime { - fn index() -> Option { - let type_id = sp_std::any::TypeId::of::

(); - if type_id == sp_std::any::TypeId::of::>() { - return Some(0) - } - if type_id == sp_std::any::TypeId::of::() { - return Some(1) - } - if type_id == sp_std::any::TypeId::of::() { - return Some(2) - } - - None - } - fn name() -> Option<&'static str> { - let type_id = sp_std::any::TypeId::of::

(); - if type_id == sp_std::any::TypeId::of::>() { - return Some("System") - } - if type_id == sp_std::any::TypeId::of::() { - return Some("EventModule") - } - if type_id == sp_std::any::TypeId::of::() { - return Some("EventModule2") - } - - None - } - } - - impl_outer_event! { - pub enum TestEvent for TestRuntime { - system, - event_module, - event_module2, - } - } - - impl_outer_origin! { - pub enum Origin for TestRuntime where system = system {} - } - - impl_outer_dispatch! { - pub enum Call for TestRuntime where origin: Origin { - event_module::EventModule, - event_module2::EventModule2, - } - } - - impl event_module::Config for TestRuntime { - type Balance = u32; - } - - impl event_module2::Config for TestRuntime { - type Balance = u32; - } - - crate::parameter_types! { - pub const SystemValue: u32 = 600; - } - - impl system::Config for TestRuntime { - type BaseCallFilter = (); - type Origin = Origin; - type AccountId = u32; - type BlockNumber = u32; - type SomeValue = SystemValue; - type PalletInfo = Self; - type DbWeight = (); - type Call = Call; - } - - impl_runtime_metadata!( - for TestRuntime with pallets where Extrinsic = TestExtrinsic - system::Pallet as System { index 0 } with Event, - event_module::Module as Module { index 1 } with Event Call, - event_module2::Module as Module2 { index 2 } with Event Storage Call, - ); - - struct ConstantBlockNumberByteGetter; - impl DefaultByte for ConstantBlockNumberByteGetter { - fn default_byte(&self) -> Vec { - 100u32.encode() - } - } - - struct ConstantGetTypeByteGetter; - impl DefaultByte for ConstantGetTypeByteGetter { - fn default_byte(&self) -> Vec { - SystemValue::get().encode() - } - } - - struct ConstantAssociatedConstByteGetter; - impl DefaultByte for ConstantAssociatedConstByteGetter { - fn default_byte(&self) -> Vec { - ::ASSOCIATED_CONST.encode() - } - } - - #[test] - fn runtime_metadata() { - let expected_metadata: RuntimeMetadataLastVersion = RuntimeMetadataLastVersion { - modules: DecodeDifferent::Encode(&[ - ModuleMetadata { - name: DecodeDifferent::Encode("System"), - index: 0, - storage: None, - calls: None, - event: Some(DecodeDifferent::Encode( - FnEncode(||&[ - EventMetadata { - name: DecodeDifferent::Encode("SystemEvent"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]) - } - ]) - )), - constants: DecodeDifferent::Encode( - FnEncode(|| &[ - ModuleConstantMetadata { - name: DecodeDifferent::Encode("BlockNumber"), - ty: DecodeDifferent::Encode("T::BlockNumber"), - value: DecodeDifferent::Encode( - DefaultByteGetter(&ConstantBlockNumberByteGetter) - ), - documentation: DecodeDifferent::Encode(&[" Hi, I am a comment."]), - }, - ModuleConstantMetadata { - name: DecodeDifferent::Encode("GetType"), - ty: DecodeDifferent::Encode("T::AccountId"), - value: DecodeDifferent::Encode( - DefaultByteGetter(&ConstantGetTypeByteGetter) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - ModuleConstantMetadata { - name: DecodeDifferent::Encode("ASSOCIATED_CONST"), - ty: DecodeDifferent::Encode("u64"), - value: DecodeDifferent::Encode( - DefaultByteGetter(&ConstantAssociatedConstByteGetter) - ), - documentation: DecodeDifferent::Encode(&[]), - } - ]) - ), - errors: DecodeDifferent::Encode(FnEncode(|| &[])), - }, - ModuleMetadata { - name: DecodeDifferent::Encode("Module"), - index: 1, - storage: None, - calls: Some( - DecodeDifferent::Encode(FnEncode(|| &[ - FunctionMetadata { - name: DecodeDifferent::Encode("aux_0"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - } - ]))), - event: Some(DecodeDifferent::Encode( - FnEncode(||&[ - EventMetadata { - name: DecodeDifferent::Encode("TestEvent"), - arguments: DecodeDifferent::Encode(&["Balance"]), - documentation: DecodeDifferent::Encode(&[" Hi, I am a comment."]) - } - ]) - )), - constants: DecodeDifferent::Encode(FnEncode(|| &[])), - errors: DecodeDifferent::Encode(FnEncode(|| &[ - ErrorMetadata { - name: DecodeDifferent::Encode("UserInputError"), - documentation: DecodeDifferent::Encode(&[" Some user input error"]), - }, - ErrorMetadata { - name: DecodeDifferent::Encode("BadThingHappened"), - documentation: DecodeDifferent::Encode(&[ - " Something bad happened", - " this could be due to many reasons", - ]), - }, - ])), - }, - ModuleMetadata { - name: DecodeDifferent::Encode("Module2"), - index: 2, - storage: Some(DecodeDifferent::Encode( - FnEncode(|| StorageMetadata { - prefix: DecodeDifferent::Encode("TestStorage"), - entries: DecodeDifferent::Encode( - &[ - StorageEntryMetadata { - name: DecodeDifferent::Encode("StorageMethod"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter( - &event_module2::__GetByteStructStorageMethod( - std::marker::PhantomData:: - ) - ) - ), - documentation: DecodeDifferent::Encode(&[]), - } - ] - ) - }), - )), - calls: Some(DecodeDifferent::Encode(FnEncode(|| &[]))), - event: Some(DecodeDifferent::Encode( - FnEncode(||&[ - EventMetadata { - name: DecodeDifferent::Encode("TestEvent"), - arguments: DecodeDifferent::Encode(&["Balance"]), - documentation: DecodeDifferent::Encode(&[]) - } - ]) - )), - constants: DecodeDifferent::Encode(FnEncode(|| &[])), - errors: DecodeDifferent::Encode(FnEncode(|| &[])), - }, - ]), - extrinsic: ExtrinsicMetadata { - version: 1, - signed_extensions: vec![ - DecodeDifferent::Encode("testextension"), - DecodeDifferent::Encode("testextension2"), - ], - } - }; - - let metadata_encoded = TestRuntime::metadata().encode(); - let metadata_decoded = RuntimeMetadataPrefixed::decode(&mut &metadata_encoded[..]); - let expected_metadata: RuntimeMetadataPrefixed = expected_metadata.into(); - - pretty_assertions::assert_eq!(expected_metadata, metadata_decoded.unwrap()); - } -} From 0a2472d8364bc103a0a13c8e2dcb7f6ed3e44342 Mon Sep 17 00:00:00 2001 From: tgmichel Date: Tue, 8 Jun 2021 13:18:57 +0200 Subject: [PATCH 15/61] `rpc-http-threads` cli arg (#8890) * Add optional `rpc-http-threads` cli arg * Update `http::ServerBuilder`threads --- client/cli/src/commands/run_cmd.rs | 8 ++++++++ client/cli/src/config.rs | 8 ++++++++ client/rpc-servers/src/lib.rs | 6 +++++- client/service/src/config.rs | 2 ++ client/service/src/lib.rs | 1 + client/service/test/src/lib.rs | 1 + test-utils/test-runner/src/utils.rs | 1 + utils/browser/src/lib.rs | 1 + 8 files changed, 27 insertions(+), 1 deletion(-) diff --git a/client/cli/src/commands/run_cmd.rs b/client/cli/src/commands/run_cmd.rs index 9ef14cfa02b82..3e5823ef733aa 100644 --- a/client/cli/src/commands/run_cmd.rs +++ b/client/cli/src/commands/run_cmd.rs @@ -122,6 +122,10 @@ pub struct RunCmd { #[structopt(long = "ws-max-connections", value_name = "COUNT")] pub ws_max_connections: Option, + /// Size of the RPC HTTP server thread pool. + #[structopt(long = "rpc-http-threads", value_name = "COUNT")] + pub rpc_http_threads: Option, + /// Specify browser Origins allowed to access the HTTP & WS RPC servers. /// /// A comma-separated list of origins (protocol://domain or special `null` @@ -376,6 +380,10 @@ impl CliConfiguration for RunCmd { Ok(self.ws_max_connections) } + fn rpc_http_threads(&self) -> Result> { + Ok(self.rpc_http_threads) + } + fn rpc_cors(&self, is_dev: bool) -> Result>> { Ok(self .rpc_cors diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index a21a79afe9fdb..62afc849c09fb 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -358,6 +358,13 @@ pub trait CliConfiguration: Sized { Ok(None) } + /// Get the RPC HTTP thread pool size (`None` for a default 4-thread pool config). + /// + /// By default this is `None`. + fn rpc_http_threads(&self) -> Result> { + Ok(None) + } + /// Get the RPC cors (`None` if disabled) /// /// By default this is `Some(Vec::new())`. @@ -526,6 +533,7 @@ pub trait CliConfiguration: Sized { rpc_ipc: self.rpc_ipc()?, rpc_methods: self.rpc_methods()?, rpc_ws_max_connections: self.rpc_ws_max_connections()?, + rpc_http_threads: self.rpc_http_threads()?, rpc_cors: self.rpc_cors(is_dev)?, prometheus_config: self.prometheus_config(DCV::prometheus_listen_port())?, telemetry_endpoints, diff --git a/client/rpc-servers/src/lib.rs b/client/rpc-servers/src/lib.rs index be6abea67b055..cb2704efc82ab 100644 --- a/client/rpc-servers/src/lib.rs +++ b/client/rpc-servers/src/lib.rs @@ -33,6 +33,9 @@ pub const MAX_PAYLOAD: usize = 15 * 1024 * 1024; /// Default maximum number of connections for WS RPC servers. const WS_MAX_CONNECTIONS: usize = 100; +/// Default thread pool size for RPC HTTP servers. +const HTTP_THREADS: usize = 4; + /// The RPC IoHandler containing all requested APIs. pub type RpcHandler = pubsub::PubSubHandler; @@ -79,11 +82,12 @@ mod inner { /// **Note**: Only available if `not(target_os = "unknown")`. pub fn start_http( addr: &std::net::SocketAddr, + thread_pool_size: Option, cors: Option<&Vec>, io: RpcHandler, ) -> io::Result { http::ServerBuilder::new(io) - .threads(4) + .threads(thread_pool_size.unwrap_or(HTTP_THREADS)) .health_api(("/health", "system_health")) .allowed_hosts(hosts_filtering(cors.is_some())) .rest_api(if cors.is_some() { diff --git a/client/service/src/config.rs b/client/service/src/config.rs index 5d8ee89225cb8..f2c5f2c6ed407 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -89,6 +89,8 @@ pub struct Configuration { pub rpc_ipc: Option, /// Maximum number of connections for WebSockets RPC server. `None` if default. pub rpc_ws_max_connections: Option, + /// Size of the RPC HTTP server thread pool. `None` if default. + pub rpc_http_threads: Option, /// CORS settings for HTTP & WS servers. `None` if all origins are allowed. pub rpc_cors: Option>, /// RPC methods to expose (by default only a safe subset or all of them). diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index ae2cfbc8b8941..51ee0965ebcf4 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -381,6 +381,7 @@ fn start_rpc_servers< config.rpc_http, |address| sc_rpc_server::start_http( address, + config.rpc_http_threads, config.rpc_cors.as_ref(), gen_handler( deny_unsafe(&address, &config.rpc_methods), diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index a80c53a8c21c5..3999b852ac74c 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -262,6 +262,7 @@ fn node_config Date: Tue, 8 Jun 2021 10:02:57 -0400 Subject: [PATCH 16/61] Emit `Bonded` event when rebonding (#9040) * Emit `Bonded` event when rebonding * fix borrow checker * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_staking --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/staking/src/weights.rs --template=./.maintain/frame-weight-template.hbs Co-authored-by: Parity Bot --- frame/staking/src/lib.rs | 1 + frame/staking/src/weights.rs | 218 +++++++++++++++++------------------ 2 files changed, 110 insertions(+), 109 deletions(-) diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index c8011faef1513..888601e307f35 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -1820,6 +1820,7 @@ decl_module! { // last check: the new active amount of ledger must be more than ED. ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientValue); + Self::deposit_event(RawEvent::Bonded(ledger.stash.clone(), value)); Self::update_ledger(&controller, &ledger); Ok(Some( 35 * WEIGHT_PER_MICROS diff --git a/frame/staking/src/weights.rs b/frame/staking/src/weights.rs index d3274cad8050e..5960d6612566e 100644 --- a/frame/staking/src/weights.rs +++ b/frame/staking/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_staking //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-03-25, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-06-07, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -76,155 +76,155 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn bond() -> Weight { - (79_895_000 as Weight) + (91_959_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn bond_extra() -> Weight { - (60_561_000 as Weight) + (69_291_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn unbond() -> Weight { - (54_996_000 as Weight) + (63_513_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_update(s: u32, ) -> Weight { - (56_056_000 as Weight) + (64_747_000 as Weight) // Standard Error: 0 - .saturating_add((67_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((77_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_kill(s: u32, ) -> Weight { - (90_267_000 as Weight) + (100_375_000 as Weight) // Standard Error: 1_000 - .saturating_add((2_787_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((3_067_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn validate() -> Weight { - (16_345_000 as Weight) + (17_849_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn kick(k: u32, ) -> Weight { - (27_080_000 as Weight) - // Standard Error: 14_000 - .saturating_add((18_739_000 as Weight).saturating_mul(k as Weight)) + (27_939_000 as Weight) + // Standard Error: 16_000 + .saturating_add((21_431_000 as Weight).saturating_mul(k as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(k as Weight))) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } fn nominate(n: u32, ) -> Weight { - (29_101_000 as Weight) - // Standard Error: 23_000 - .saturating_add((5_670_000 as Weight).saturating_mul(n as Weight)) + (32_791_000 as Weight) + // Standard Error: 33_000 + .saturating_add((7_006_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn chill() -> Weight { - (15_771_000 as Weight) + (17_014_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn set_payee() -> Weight { - (13_329_000 as Weight) + (14_816_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_controller() -> Weight { - (29_807_000 as Weight) + (33_600_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn set_validator_count() -> Weight { - (2_323_000 as Weight) + (2_706_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_no_eras() -> Weight { - (2_528_000 as Weight) + (2_973_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_new_era() -> Weight { - (2_529_000 as Weight) + (2_949_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_new_era_always() -> Weight { - (2_527_000 as Weight) + (3_011_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_invulnerables(v: u32, ) -> Weight { - (2_661_000 as Weight) + (3_078_000 as Weight) // Standard Error: 0 - .saturating_add((35_000 as Weight).saturating_mul(v as Weight)) + .saturating_add((5_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_unstake(s: u32, ) -> Weight { - (64_650_000 as Weight) + (69_220_000 as Weight) // Standard Error: 1_000 - .saturating_add((2_755_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((3_070_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn cancel_deferred_slash(s: u32, ) -> Weight { - (5_904_642_000 as Weight) - // Standard Error: 393_000 - .saturating_add((34_810_000 as Weight).saturating_mul(s as Weight)) + (3_460_399_000 as Weight) + // Standard Error: 222_000 + .saturating_add((19_782_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn payout_stakers_dead_controller(n: u32, ) -> Weight { - (131_368_000 as Weight) - // Standard Error: 17_000 - .saturating_add((52_611_000 as Weight).saturating_mul(n as Weight)) + (120_436_000 as Weight) + // Standard Error: 27_000 + .saturating_add((63_092_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(10 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) } fn payout_stakers_alive_staked(n: u32, ) -> Weight { - (165_079_000 as Weight) - // Standard Error: 27_000 - .saturating_add((66_740_000 as Weight).saturating_mul(n as Weight)) + (181_424_000 as Weight) + // Standard Error: 51_000 + .saturating_add((78_631_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(11 as Weight)) .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(3 as Weight)) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) } fn rebond(l: u32, ) -> Weight { - (37_039_000 as Weight) + (59_349_000 as Weight) // Standard Error: 2_000 - .saturating_add((93_000 as Weight).saturating_mul(l as Weight)) + .saturating_add((64_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn set_history_depth(e: u32, ) -> Weight { (0 as Weight) - // Standard Error: 71_000 - .saturating_add((34_403_000 as Weight).saturating_mul(e as Weight)) + // Standard Error: 97_000 + .saturating_add((44_609_000 as Weight).saturating_mul(e as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) .saturating_add(T::DbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) } fn reap_stash(s: u32, ) -> Weight { - (67_561_000 as Weight) - // Standard Error: 0 - .saturating_add((2_766_000 as Weight).saturating_mul(s as Weight)) + (72_356_000 as Weight) + // Standard Error: 2_000 + .saturating_add((3_066_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn new_era(v: u32, n: u32, ) -> Weight { (0 as Weight) - // Standard Error: 1_016_000 - .saturating_add((389_979_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 51_000 - .saturating_add((63_208_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 1_462_000 + .saturating_add((393_007_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 73_000 + .saturating_add((72_014_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(10 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) @@ -233,21 +233,21 @@ impl WeightInfo for SubstrateWeight { } fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { (0 as Weight) - // Standard Error: 95_000 - .saturating_add((26_419_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 95_000 - .saturating_add((29_033_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 1_305_000 - .saturating_add((23_680_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 235_000 + .saturating_add((35_212_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 235_000 + .saturating_add((38_391_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 3_200_000 + .saturating_add((31_130_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) } fn get_npos_targets(v: u32, ) -> Weight { - (0 as Weight) - // Standard Error: 32_000 - .saturating_add((11_317_000 as Weight).saturating_mul(v as Weight)) + (52_314_000 as Weight) + // Standard Error: 71_000 + .saturating_add((15_195_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) } @@ -256,155 +256,155 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { fn bond() -> Weight { - (79_895_000 as Weight) + (91_959_000 as Weight) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn bond_extra() -> Weight { - (60_561_000 as Weight) + (69_291_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn unbond() -> Weight { - (54_996_000 as Weight) + (63_513_000 as Weight) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_update(s: u32, ) -> Weight { - (56_056_000 as Weight) + (64_747_000 as Weight) // Standard Error: 0 - .saturating_add((67_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((77_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_kill(s: u32, ) -> Weight { - (90_267_000 as Weight) + (100_375_000 as Weight) // Standard Error: 1_000 - .saturating_add((2_787_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((3_067_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(8 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn validate() -> Weight { - (16_345_000 as Weight) + (17_849_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn kick(k: u32, ) -> Weight { - (27_080_000 as Weight) - // Standard Error: 14_000 - .saturating_add((18_739_000 as Weight).saturating_mul(k as Weight)) + (27_939_000 as Weight) + // Standard Error: 16_000 + .saturating_add((21_431_000 as Weight).saturating_mul(k as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(k as Weight))) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } fn nominate(n: u32, ) -> Weight { - (29_101_000 as Weight) - // Standard Error: 23_000 - .saturating_add((5_670_000 as Weight).saturating_mul(n as Weight)) + (32_791_000 as Weight) + // Standard Error: 33_000 + .saturating_add((7_006_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn chill() -> Weight { - (15_771_000 as Weight) + (17_014_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn set_payee() -> Weight { - (13_329_000 as Weight) + (14_816_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn set_controller() -> Weight { - (29_807_000 as Weight) + (33_600_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn set_validator_count() -> Weight { - (2_323_000 as Weight) + (2_706_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_no_eras() -> Weight { - (2_528_000 as Weight) + (2_973_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_new_era() -> Weight { - (2_529_000 as Weight) + (2_949_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_new_era_always() -> Weight { - (2_527_000 as Weight) + (3_011_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn set_invulnerables(v: u32, ) -> Weight { - (2_661_000 as Weight) + (3_078_000 as Weight) // Standard Error: 0 - .saturating_add((35_000 as Weight).saturating_mul(v as Weight)) + .saturating_add((5_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_unstake(s: u32, ) -> Weight { - (64_650_000 as Weight) + (69_220_000 as Weight) // Standard Error: 1_000 - .saturating_add((2_755_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((3_070_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(8 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn cancel_deferred_slash(s: u32, ) -> Weight { - (5_904_642_000 as Weight) - // Standard Error: 393_000 - .saturating_add((34_810_000 as Weight).saturating_mul(s as Weight)) + (3_460_399_000 as Weight) + // Standard Error: 222_000 + .saturating_add((19_782_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn payout_stakers_dead_controller(n: u32, ) -> Weight { - (131_368_000 as Weight) - // Standard Error: 17_000 - .saturating_add((52_611_000 as Weight).saturating_mul(n as Weight)) + (120_436_000 as Weight) + // Standard Error: 27_000 + .saturating_add((63_092_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(10 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) } fn payout_stakers_alive_staked(n: u32, ) -> Weight { - (165_079_000 as Weight) - // Standard Error: 27_000 - .saturating_add((66_740_000 as Weight).saturating_mul(n as Weight)) + (181_424_000 as Weight) + // Standard Error: 51_000 + .saturating_add((78_631_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(11 as Weight)) .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) } fn rebond(l: u32, ) -> Weight { - (37_039_000 as Weight) + (59_349_000 as Weight) // Standard Error: 2_000 - .saturating_add((93_000 as Weight).saturating_mul(l as Weight)) + .saturating_add((64_000 as Weight).saturating_mul(l as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn set_history_depth(e: u32, ) -> Weight { (0 as Weight) - // Standard Error: 71_000 - .saturating_add((34_403_000 as Weight).saturating_mul(e as Weight)) + // Standard Error: 97_000 + .saturating_add((44_609_000 as Weight).saturating_mul(e as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) .saturating_add(RocksDbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) } fn reap_stash(s: u32, ) -> Weight { - (67_561_000 as Weight) - // Standard Error: 0 - .saturating_add((2_766_000 as Weight).saturating_mul(s as Weight)) + (72_356_000 as Weight) + // Standard Error: 2_000 + .saturating_add((3_066_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(8 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn new_era(v: u32, n: u32, ) -> Weight { (0 as Weight) - // Standard Error: 1_016_000 - .saturating_add((389_979_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 51_000 - .saturating_add((63_208_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 1_462_000 + .saturating_add((393_007_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 73_000 + .saturating_add((72_014_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(10 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) @@ -413,21 +413,21 @@ impl WeightInfo for () { } fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { (0 as Weight) - // Standard Error: 95_000 - .saturating_add((26_419_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 95_000 - .saturating_add((29_033_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 1_305_000 - .saturating_add((23_680_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 235_000 + .saturating_add((35_212_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 235_000 + .saturating_add((38_391_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 3_200_000 + .saturating_add((31_130_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) } fn get_npos_targets(v: u32, ) -> Weight { - (0 as Weight) - // Standard Error: 32_000 - .saturating_add((11_317_000 as Weight).saturating_mul(v as Weight)) + (52_314_000 as Weight) + // Standard Error: 71_000 + .saturating_add((15_195_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) } From 5f5fbea7a787d8b5db6133530e1dbb0606c08cf1 Mon Sep 17 00:00:00 2001 From: Niklas Adolfsson Date: Tue, 8 Jun 2021 18:17:25 +0200 Subject: [PATCH 17/61] deps(jsonrpsee): update to 0.2.0 to avoid alpha (#9036) The motivation is a couple of bug fixes and not to pin to certain alpha versions. --- Cargo.lock | 424 ++++++++++++-------- utils/frame/remote-externalities/Cargo.toml | 6 +- 2 files changed, 258 insertions(+), 172 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8a25ba6c7a480..cc8557daad2f4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -349,19 +349,6 @@ version = "4.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e91831deabf0d6d7ec49552e489aed63b7456a7a3c46cff62adad428110b0af0" -[[package]] -name = "async-tls" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f23d769dbf1838d5df5156e7b1ad404f4c463d1ac2c6aeb6cd943630f8a8400" -dependencies = [ - "futures-core", - "futures-io", - "rustls 0.19.0", - "webpki 0.21.4", - "webpki-roots", -] - [[package]] name = "async-trait" version = "0.1.48" @@ -474,6 +461,9 @@ name = "beef" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6736e2428df2ca2848d846c43e88745121a6654696e349ce0054a420815a7409" +dependencies = [ + "serde", +] [[package]] name = "bincode" @@ -938,7 +928,17 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171" dependencies = [ - "core-foundation-sys", + "core-foundation-sys 0.7.0", + "libc", +] + +[[package]] +name = "core-foundation" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a89e2ae426ea83155dccf10c0fa6b1463ef6d5fcb44cee0b224a408fa640a62" +dependencies = [ + "core-foundation-sys 0.8.2", "libc", ] @@ -948,6 +948,12 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" +[[package]] +name = "core-foundation-sys" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" + [[package]] name = "cpp_demangle" version = "0.3.2" @@ -1604,7 +1610,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e43f2f1833d64e33f15592464d6fdd70f349dda7b1a53088eb83cd94014008c5" dependencies = [ - "futures 0.3.13", + "futures 0.3.15", ] [[package]] @@ -1676,7 +1682,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6447e2f8178843749e8c8003206def83ec124a7859475395777a28b5338647c" dependencies = [ "either", - "futures 0.3.13", + "futures 0.3.15", "futures-timer 3.0.2", "log", "num-traits", @@ -2020,9 +2026,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f55667319111d593ba876406af7c409c0ebb44dc4be6132a783ccf163ea14c1" +checksum = "0e7e43a803dae2fa37c1f6a8fe121e1f7bf9548b4dfc0522a42f34145dadfc27" dependencies = [ "futures-channel", "futures-core", @@ -2035,9 +2041,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c2dd2df839b57db9ab69c2c9d8f3e8c81984781937fe2807dc6dcf3b2ad2939" +checksum = "e682a68b29a882df0545c143dc3646daefe80ba479bcdede94d5a703de2871e2" dependencies = [ "futures-core", "futures-sink", @@ -2045,9 +2051,9 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15496a72fabf0e62bdc3df11a59a3787429221dd0710ba8ef163d6f7a9112c94" +checksum = "0402f765d8a89a26043b889b26ce3c4679d268fa6bb22cd7c6aad98340e179d1" [[package]] name = "futures-cpupool" @@ -2066,7 +2072,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdcef58a173af8148b182684c9f2d5250875adbcaff7b5794073894f9d8634a9" dependencies = [ "futures 0.1.31", - "futures 0.3.13", + "futures 0.3.15", "lazy_static", "log", "parking_lot 0.9.0", @@ -2077,9 +2083,9 @@ dependencies = [ [[package]] name = "futures-executor" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "891a4b7b96d84d5940084b2a37632dd65deeae662c114ceaa2c879629c9c0ad1" +checksum = "badaa6a909fac9e7236d0620a2f57f7664640c56575b71a7552fbd68deafab79" dependencies = [ "futures-core", "futures-task", @@ -2089,9 +2095,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71c2c65c57704c32f5241c1223167c2c3294fd34ac020c807ddbe6db287ba59" +checksum = "acc499defb3b348f8d8f3f66415835a9131856ff7714bf10dadfc4ec4bdb29a1" [[package]] name = "futures-lite" @@ -2110,10 +2116,11 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea405816a5139fb39af82c2beb921d52143f556038378d6db21183a5c37fbfb7" +checksum = "a4c40298486cdf52cc00cd6d6987892ba502c7656a16a4192a9992b1ccedd121" dependencies = [ + "autocfg", "proc-macro-hack", "proc-macro2", "quote", @@ -2127,21 +2134,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3a1387e07917c711fb4ee4f48ea0adb04a3c9739e53ef85bf43ae1edc2937a8b" dependencies = [ "futures-io", - "rustls 0.19.0", - "webpki 0.21.4", + "rustls 0.19.1", + "webpki", ] [[package]] name = "futures-sink" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85754d98985841b7d4f5e8e6fbfa4a4ac847916893ec511a2917ccd8525b8bb3" +checksum = "a57bead0ceff0d6dde8f465ecd96c9338121bb7717d3e7b108059531870c4282" [[package]] name = "futures-task" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa189ef211c15ee602667a6fcfe1c1fd9e07d42250d2156382820fba33c9df80" +checksum = "8a16bef9fc1a4dddb5bee51c989e3fbba26569cbb0e31f5b303c184e3dd33dae" [[package]] name = "futures-timer" @@ -2161,10 +2168,11 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1812c7ab8aedf8d6f2701a43e1243acdbcc2b36ab26e2ad421eb99ac963d96d1" +checksum = "feb5c238d27e2bf94ffdfd27b2c29e3df4a68c4193bb6427384259e2bf191967" dependencies = [ + "autocfg", "futures 0.1.31", "futures-channel", "futures-core", @@ -2514,6 +2522,17 @@ dependencies = [ "http 0.2.3", ] +[[package]] +name = "http-body" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60daa14be0e0786db0f03a9e57cb404c9d756eed2b6c62b9ea98ec5743ec75a9" +dependencies = [ + "bytes 1.0.1", + "http 0.2.3", + "pin-project-lite 0.2.6", +] + [[package]] name = "httparse" version = "1.3.5" @@ -2595,6 +2614,28 @@ dependencies = [ "want 0.3.0", ] +[[package]] +name = "hyper" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bf09f61b52cfcf4c00de50df88ae423d6c02354e385a86341133b5338630ad1" +dependencies = [ + "bytes 1.0.1", + "futures-channel", + "futures-core", + "futures-util", + "http 0.2.3", + "http-body 0.4.2", + "httparse", + "httpdate", + "itoa", + "pin-project 1.0.5", + "tokio 1.6.0", + "tower-service", + "tracing", + "want 0.3.0", +] + [[package]] name = "hyper-rustls" version = "0.21.0" @@ -2607,10 +2648,10 @@ dependencies = [ "hyper 0.13.10", "log", "rustls 0.18.1", - "rustls-native-certs", + "rustls-native-certs 0.4.0", "tokio 0.2.25", - "tokio-rustls", - "webpki 0.21.4", + "tokio-rustls 0.14.1", + "webpki", ] [[package]] @@ -2663,7 +2704,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a6d52908d4ea4ab2bc22474ba149bf1011c8e2c3ebc1ff593ae28ac44f494b6" dependencies = [ "async-io", - "futures 0.3.13", + "futures 0.3.15", "futures-lite", "if-addrs", "ipnet", @@ -2739,7 +2780,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64fa110ec7b8f493f416eed552740d10e7030ad5f63b2308f82c9608ec2df275" dependencies = [ - "futures 0.3.13", + "futures 0.3.15", "futures-timer 2.0.2", ] @@ -2943,9 +2984,9 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.2.0-alpha.6" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5784ee8bb31988fa2c7a755fe31b0e21aa51894a67e5c99b6d4470f0253bf31a" +checksum = "3b4c85cfa6767333f3e5f3b2f2f765dad2727b0033ee270ae07c599bf43ed5ae" dependencies = [ "Inflector", "proc-macro-crate 1.0.0", @@ -2956,40 +2997,44 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.2.0-alpha.6" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab3dabceeeeb865897661d532d47202eaae71cd2c606f53cb69f1fbc0555a51" +checksum = "c0cf7bd4e93b3b56e59131de7f24afbea871faf914e97bcdd942c86927ab0172" dependencies = [ "async-trait", "beef", "futures-channel", "futures-util", + "hyper 0.14.5", "log", "serde", "serde_json", + "soketto 0.5.0", "thiserror", ] [[package]] name = "jsonrpsee-ws-client" -version = "0.2.0-alpha.6" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6fdb4390bd25358c62e8b778652a564a1723ba07dca0feb3da439c2253fe59f" +checksum = "6ec51150965544e1a4468f372bdab8545243a1b045d4ab272023aac74c60de32" dependencies = [ - "async-std", - "async-tls", "async-trait", "fnv", - "futures 0.3.13", + "futures 0.3.15", "jsonrpsee-types", "log", "pin-project 1.0.5", + "rustls 0.19.1", + "rustls-native-certs 0.5.0", "serde", "serde_json", - "soketto", + "soketto 0.5.0", "thiserror", + "tokio 0.2.25", + "tokio-rustls 0.15.0", + "tokio-util", "url 2.2.1", - "webpki 0.22.0", ] [[package]] @@ -3073,7 +3118,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb1e98ba343d0b35f9009a8844cd2b87fa3192f7e79033ac05b00aeae0f3b0b5" dependencies = [ - "futures 0.3.13", + "futures 0.3.15", "js-sys", "kvdb", "kvdb-memorydb", @@ -3105,9 +3150,9 @@ checksum = "3576a87f2ba00f6f106fdfcd16db1d698d648a26ad8e0573cad8537c3c362d2a" [[package]] name = "libc" -version = "0.2.90" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba4aede83fc3617411dc6993bc8c70919750c1c257c6ca6a502aed6e0e2394ae" +checksum = "789da6d93f1b866ffe175afc5322a4d76c038605a1c3319bb57b06967ca98a36" [[package]] name = "libloading" @@ -3143,7 +3188,7 @@ checksum = "08053fbef67cd777049ef7a95ebaca2ece370b4ed7712c3fa404d69a88cb741b" dependencies = [ "atomic", "bytes 1.0.1", - "futures 0.3.13", + "futures 0.3.15", "lazy_static", "libp2p-core", "libp2p-deflate", @@ -3185,7 +3230,7 @@ dependencies = [ "ed25519-dalek", "either", "fnv", - "futures 0.3.13", + "futures 0.3.15", "futures-timer 3.0.2", "lazy_static", "libsecp256k1", @@ -3215,7 +3260,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2181a641cd15f9b6ba71b1335800f309012a0a97a29ffaabbbf40e9d3d58f08" dependencies = [ "flate2", - "futures 0.3.13", + "futures 0.3.15", "libp2p-core", ] @@ -3226,7 +3271,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62e63dab8b5ff35e0c101a3e51e843ba782c07bbb1682f5fd827622e0d02b98b" dependencies = [ "async-std-resolver", - "futures 0.3.13", + "futures 0.3.15", "libp2p-core", "log", "smallvec 1.6.1", @@ -3241,7 +3286,7 @@ checksum = "48a9b570f6766301d9c4aa00fce3554cad1598e2f466debbc4dde909028417cf" dependencies = [ "cuckoofilter", "fnv", - "futures 0.3.13", + "futures 0.3.15", "libp2p-core", "libp2p-swarm", "log", @@ -3262,7 +3307,7 @@ dependencies = [ "byteorder", "bytes 1.0.1", "fnv", - "futures 0.3.13", + "futures 0.3.15", "hex_fmt", "libp2p-core", "libp2p-swarm", @@ -3283,7 +3328,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f668f00efd9883e8b7bcc582eaf0164615792608f886f6577da18bcbeea0a46" dependencies = [ - "futures 0.3.13", + "futures 0.3.15", "libp2p-core", "libp2p-swarm", "log", @@ -3304,7 +3349,7 @@ dependencies = [ "bytes 1.0.1", "either", "fnv", - "futures 0.3.13", + "futures 0.3.15", "libp2p-core", "libp2p-swarm", "log", @@ -3328,7 +3373,7 @@ dependencies = [ "async-io", "data-encoding", "dns-parser", - "futures 0.3.13", + "futures 0.3.15", "if-watch", "lazy_static", "libp2p-core", @@ -3348,7 +3393,7 @@ checksum = "85e9b544335d1ed30af71daa96edbefadef6f19c7a55f078b9fc92c87163105d" dependencies = [ "asynchronous-codec 0.6.0", "bytes 1.0.1", - "futures 0.3.13", + "futures 0.3.15", "libp2p-core", "log", "nohash-hasher", @@ -3366,7 +3411,7 @@ checksum = "36db0f0db3b0433f5b9463f1c0cd9eadc0a3734a9170439ce501ff99733a88bd" dependencies = [ "bytes 1.0.1", "curve25519-dalek 3.0.2", - "futures 0.3.13", + "futures 0.3.15", "lazy_static", "libp2p-core", "log", @@ -3386,7 +3431,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf4bfaffac63bf3c7ec11ed9d8879d455966ddea7e78ee14737f0b6dce0d1cd1" dependencies = [ - "futures 0.3.13", + "futures 0.3.15", "libp2p-core", "libp2p-swarm", "log", @@ -3403,7 +3448,7 @@ checksum = "0c8c37b4d2a075b4be8442760a5f8c037180f0c8dd5b5734b9978ab868b3aa11" dependencies = [ "asynchronous-codec 0.6.0", "bytes 1.0.1", - "futures 0.3.13", + "futures 0.3.15", "libp2p-core", "log", "prost", @@ -3418,7 +3463,7 @@ version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ce3374f3b28162db9d3442c9347c4f14cb01e8290052615c7d341d40eae0599" dependencies = [ - "futures 0.3.13", + "futures 0.3.15", "log", "pin-project 1.0.5", "rand 0.7.3", @@ -3434,7 +3479,7 @@ checksum = "0b8786aca3f18671d8776289706a5521f6c9124a820f69e358de214b9939440d" dependencies = [ "asynchronous-codec 0.6.0", "bytes 1.0.1", - "futures 0.3.13", + "futures 0.3.15", "futures-timer 3.0.2", "libp2p-core", "libp2p-swarm", @@ -3457,7 +3502,7 @@ checksum = "1cdbe172f08e6d0f95fa8634e273d4c4268c4063de2e33e7435194b0130c62e3" dependencies = [ "async-trait", "bytes 1.0.1", - "futures 0.3.13", + "futures 0.3.15", "libp2p-core", "libp2p-swarm", "log", @@ -3476,7 +3521,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e04d8e1eef675029ec728ba14e8d0da7975d84b6679b699b4ae91a1de9c3a92" dependencies = [ "either", - "futures 0.3.13", + "futures 0.3.15", "libp2p-core", "log", "rand 0.7.3", @@ -3502,7 +3547,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b1a27d21c477951799e99d5c105d78868258502ce092988040a808d5a19bbd9" dependencies = [ "async-io", - "futures 0.3.13", + "futures 0.3.15", "futures-timer 3.0.2", "if-watch", "ipnet", @@ -3519,7 +3564,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffd6564bb3b7ff203661ccbb69003c2b551e34cef974f2d6c6a28306a12170b5" dependencies = [ "async-std", - "futures 0.3.13", + "futures 0.3.15", "libp2p-core", "log", ] @@ -3530,7 +3575,7 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cef45d61e43c313531b5e903e4e8415212ff6338e0c54c47da5b9b412b5760de" dependencies = [ - "futures 0.3.13", + "futures 0.3.15", "js-sys", "libp2p-core", "parity-send-wrapper", @@ -3545,13 +3590,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cace60995ef6f637e4752cccbb2590f6bc358e8741a0d066307636c69a4b3a74" dependencies = [ "either", - "futures 0.3.13", + "futures 0.3.15", "futures-rustls", "libp2p-core", "log", "quicksink", "rw-stream-sink", - "soketto", + "soketto 0.4.2", "url 2.2.1", "webpki-roots", ] @@ -3562,7 +3607,7 @@ version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f35da42cfc6d5cb0dcf3ad6881bc68d146cdf38f98655e09e33fbba4d13eabc4" dependencies = [ - "futures 0.3.13", + "futures 0.3.15", "libp2p-core", "parking_lot 0.11.1", "thiserror", @@ -4010,7 +4055,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d91ec0a2440aaff5f78ec35631a7027d50386c6163aa975f7caa0d5da4b6ff8" dependencies = [ "bytes 1.0.1", - "futures 0.3.13", + "futures 0.3.15", "log", "pin-project 1.0.5", "smallvec 1.6.1", @@ -4083,7 +4128,7 @@ version = "0.8.0" dependencies = [ "derive_more", "fs_extra", - "futures 0.3.13", + "futures 0.3.15", "hash-db", "hex", "kvdb", @@ -4119,7 +4164,7 @@ dependencies = [ name = "node-browser-testing" version = "2.0.0" dependencies = [ - "futures 0.3.13", + "futures 0.3.15", "futures-timer 3.0.2", "jsonrpc-core", "libp2p", @@ -4141,7 +4186,7 @@ dependencies = [ "frame-benchmarking-cli", "frame-support", "frame-system", - "futures 0.3.13", + "futures 0.3.15", "hex-literal", "libp2p-wasm-ext", "log", @@ -4189,7 +4234,7 @@ dependencies = [ "sc-transaction-pool", "serde", "serde_json", - "soketto", + "soketto 0.4.2", "sp-authority-discovery", "sp-authorship", "sp-consensus", @@ -4222,7 +4267,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "futures 0.3.13", + "futures 0.3.15", "node-primitives", "node-runtime", "node-testing", @@ -4484,7 +4529,7 @@ dependencies = [ "frame-support", "frame-system", "fs_extra", - "futures 0.3.13", + "futures 0.3.15", "log", "node-executor", "node-primitives", @@ -6746,7 +6791,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "tokio 1.6.0", + "tokio 0.2.25", ] [[package]] @@ -6858,20 +6903,20 @@ dependencies = [ "log", "ring", "sct", - "webpki 0.21.4", + "webpki", ] [[package]] name = "rustls" -version = "0.19.0" +version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b" +checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" dependencies = [ "base64 0.13.0", "log", "ring", "sct", - "webpki 0.21.4", + "webpki", ] [[package]] @@ -6883,7 +6928,19 @@ dependencies = [ "openssl-probe", "rustls 0.18.1", "schannel", - "security-framework", + "security-framework 1.0.0", +] + +[[package]] +name = "rustls-native-certs" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a07b7c1885bd8ed3831c289b7870b13ef46fe0e856d288c30d9cc17d75a2092" +dependencies = [ + "openssl-probe", + "rustls 0.19.1", + "schannel", + "security-framework 2.3.0", ] [[package]] @@ -6908,7 +6965,7 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4da5fcb054c46f5a5dff833b129285a93d3f0179531735e6c866e8cc307d2020" dependencies = [ - "futures 0.3.13", + "futures 0.3.15", "pin-project 0.4.27", "static_assertions", ] @@ -6953,7 +7010,7 @@ dependencies = [ "async-trait", "derive_more", "either", - "futures 0.3.13", + "futures 0.3.15", "futures-timer 3.0.2", "ip_network", "libp2p", @@ -6982,7 +7039,7 @@ dependencies = [ name = "sc-basic-authorship" version = "0.9.0" dependencies = [ - "futures 0.3.13", + "futures 0.3.15", "futures-timer 3.0.2", "log", "parity-scale-codec", @@ -7056,7 +7113,7 @@ version = "0.9.0" dependencies = [ "chrono", "fdlimit", - "futures 0.3.13", + "futures 0.3.15", "hex", "libp2p", "log", @@ -7094,7 +7151,7 @@ version = "3.0.0" dependencies = [ "derive_more", "fnv", - "futures 0.3.13", + "futures 0.3.15", "hash-db", "kvdb", "kvdb-memorydb", @@ -7176,7 +7233,7 @@ version = "0.9.0" dependencies = [ "async-trait", "derive_more", - "futures 0.3.13", + "futures 0.3.15", "futures-timer 3.0.2", "getrandom 0.2.3", "log", @@ -7219,7 +7276,7 @@ dependencies = [ "async-trait", "derive_more", "fork-tree", - "futures 0.3.13", + "futures 0.3.15", "futures-timer 3.0.2", "log", "merlin", @@ -7273,7 +7330,7 @@ name = "sc-consensus-babe-rpc" version = "0.9.0" dependencies = [ "derive_more", - "futures 0.3.13", + "futures 0.3.15", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", @@ -7316,7 +7373,7 @@ dependencies = [ "assert_matches", "async-trait", "derive_more", - "futures 0.3.13", + "futures 0.3.15", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", @@ -7354,7 +7411,7 @@ version = "0.9.0" dependencies = [ "async-trait", "derive_more", - "futures 0.3.13", + "futures 0.3.15", "futures-timer 3.0.2", "log", "parity-scale-codec", @@ -7376,7 +7433,7 @@ name = "sc-consensus-slots" version = "0.9.0" dependencies = [ "async-trait", - "futures 0.3.13", + "futures 0.3.15", "futures-timer 3.0.2", "impl-trait-for-tuples", "log", @@ -7506,7 +7563,7 @@ dependencies = [ "dyn-clone", "finality-grandpa", "fork-tree", - "futures 0.3.13", + "futures 0.3.15", "futures-timer 3.0.2", "linked-hash-map", "log", @@ -7551,7 +7608,7 @@ version = "0.9.0" dependencies = [ "derive_more", "finality-grandpa", - "futures 0.3.13", + "futures 0.3.15", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", @@ -7581,7 +7638,7 @@ version = "0.9.0" dependencies = [ "derive_more", "finality-grandpa", - "futures 0.3.13", + "futures 0.3.15", "log", "num-traits", "parity-scale-codec", @@ -7606,7 +7663,7 @@ name = "sc-informant" version = "0.9.0" dependencies = [ "ansi_term 0.12.1", - "futures 0.3.13", + "futures 0.3.15", "futures-timer 3.0.2", "log", "parity-util-mem", @@ -7624,7 +7681,7 @@ version = "3.0.0" dependencies = [ "async-trait", "derive_more", - "futures 0.3.13", + "futures 0.3.15", "futures-util", "hex", "merlin", @@ -7673,7 +7730,7 @@ dependencies = [ "erased-serde", "fnv", "fork-tree", - "futures 0.3.13", + "futures 0.3.15", "futures-timer 3.0.2", "hex", "ip_network", @@ -7721,7 +7778,7 @@ name = "sc-network-gossip" version = "0.9.0" dependencies = [ "async-std", - "futures 0.3.13", + "futures 0.3.15", "futures-timer 3.0.2", "libp2p", "log", @@ -7742,7 +7799,7 @@ version = "0.8.0" dependencies = [ "async-std", "async-trait", - "futures 0.3.13", + "futures 0.3.15", "futures-timer 3.0.2", "libp2p", "log", @@ -7770,7 +7827,7 @@ version = "3.0.0" dependencies = [ "bytes 0.5.6", "fnv", - "futures 0.3.13", + "futures 0.3.15", "futures-timer 3.0.2", "hex", "hyper 0.13.10", @@ -7804,7 +7861,7 @@ dependencies = [ name = "sc-peerset" version = "3.0.0" dependencies = [ - "futures 0.3.13", + "futures 0.3.15", "libp2p", "log", "rand 0.7.3", @@ -7827,7 +7884,7 @@ version = "3.0.0" dependencies = [ "assert_matches", "futures 0.1.31", - "futures 0.3.13", + "futures 0.3.15", "hash-db", "jsonrpc-core", "jsonrpc-pubsub", @@ -7869,7 +7926,7 @@ name = "sc-rpc-api" version = "0.9.0" dependencies = [ "derive_more", - "futures 0.3.13", + "futures 0.3.15", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", @@ -7928,7 +7985,7 @@ dependencies = [ "directories", "exit-future", "futures 0.1.31", - "futures 0.3.13", + "futures 0.3.15", "futures-timer 3.0.2", "hash-db", "jsonrpc-core", @@ -7996,7 +8053,7 @@ version = "2.0.0" dependencies = [ "fdlimit", "futures 0.1.31", - "futures 0.3.13", + "futures 0.3.15", "hex-literal", "log", "parity-scale-codec", @@ -8064,7 +8121,7 @@ name = "sc-telemetry" version = "3.0.0" dependencies = [ "chrono", - "futures 0.3.13", + "futures 0.3.15", "libp2p", "log", "parking_lot 0.11.1", @@ -8131,7 +8188,7 @@ dependencies = [ "assert_matches", "criterion", "derive_more", - "futures 0.3.13", + "futures 0.3.15", "linked-hash-map", "log", "parity-scale-codec", @@ -8154,7 +8211,7 @@ name = "sc-transaction-pool" version = "3.0.0" dependencies = [ "assert_matches", - "futures 0.3.13", + "futures 0.3.15", "futures-diagnose", "hex", "intervalier", @@ -8268,10 +8325,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad502866817f0575705bd7be36e2b2535cc33262d493aa733a2ec862baa2bc2b" dependencies = [ "bitflags", - "core-foundation", - "core-foundation-sys", + "core-foundation 0.7.0", + "core-foundation-sys 0.7.0", "libc", - "security-framework-sys", + "security-framework-sys 1.0.0", +] + +[[package]] +name = "security-framework" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b239a3d5db51252f6f48f42172c65317f37202f4a21021bf5f9d40a408f4592c" +dependencies = [ + "bitflags", + "core-foundation 0.9.1", + "core-foundation-sys 0.8.2", + "libc", + "security-framework-sys 2.3.0", ] [[package]] @@ -8280,7 +8350,17 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51ceb04988b17b6d1dcd555390fa822ca5637b4a14e1f5099f13d351bed4d6c7" dependencies = [ - "core-foundation-sys", + "core-foundation-sys 0.7.0", + "libc", +] + +[[package]] +name = "security-framework-sys" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e4effb91b4b8b6fb7732e670b6cee160278ff8e6bf485c7805d9e319d76e284" +dependencies = [ + "core-foundation-sys 0.8.2", "libc", ] @@ -8572,13 +8652,28 @@ dependencies = [ "base64 0.12.3", "bytes 0.5.6", "flate2", - "futures 0.3.13", + "futures 0.3.15", "httparse", "log", "rand 0.7.3", "sha-1 0.9.4", ] +[[package]] +name = "soketto" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4919971d141dbadaa0e82b5d369e2d7666c98e4625046140615ca363e50d4daa" +dependencies = [ + "base64 0.13.0", + "bytes 1.0.1", + "futures 0.3.15", + "httparse", + "log", + "rand 0.8.3", + "sha-1 0.9.4", +] + [[package]] name = "sp-allocator" version = "3.0.0" @@ -8728,7 +8823,7 @@ dependencies = [ name = "sp-blockchain" version = "3.0.0" dependencies = [ - "futures 0.3.13", + "futures 0.3.15", "log", "lru", "parity-scale-codec", @@ -8754,7 +8849,7 @@ name = "sp-consensus" version = "0.9.0" dependencies = [ "async-trait", - "futures 0.3.13", + "futures 0.3.15", "futures-timer 3.0.2", "libp2p", "log", @@ -8854,7 +8949,7 @@ dependencies = [ "criterion", "dyn-clonable", "ed25519-dalek", - "futures 0.3.13", + "futures 0.3.15", "hash-db", "hash256-std-hasher", "hex", @@ -8942,7 +9037,7 @@ name = "sp-inherents" version = "3.0.0" dependencies = [ "async-trait", - "futures 0.3.13", + "futures 0.3.15", "impl-trait-for-tuples", "parity-scale-codec", "sp-core", @@ -8955,7 +9050,7 @@ dependencies = [ name = "sp-io" version = "3.0.0" dependencies = [ - "futures 0.3.13", + "futures 0.3.15", "hash-db", "libsecp256k1", "log", @@ -8991,7 +9086,7 @@ version = "0.9.0" dependencies = [ "async-trait", "derive_more", - "futures 0.3.13", + "futures 0.3.15", "merlin", "parity-scale-codec", "parking_lot 0.11.1", @@ -9327,7 +9422,7 @@ name = "sp-transaction-pool" version = "3.0.0" dependencies = [ "derive_more", - "futures 0.3.13", + "futures 0.3.15", "log", "parity-scale-codec", "serde", @@ -9373,7 +9468,7 @@ dependencies = [ name = "sp-utils" version = "3.0.0" dependencies = [ - "futures 0.3.13", + "futures 0.3.15", "futures-core", "futures-timer 3.0.2", "lazy_static", @@ -9539,7 +9634,7 @@ dependencies = [ "chrono", "console_error_panic_hook", "futures 0.1.31", - "futures 0.3.13", + "futures 0.3.15", "futures-timer 3.0.2", "getrandom 0.2.3", "js-sys", @@ -9582,7 +9677,7 @@ version = "3.0.0" dependencies = [ "frame-support", "frame-system", - "futures 0.3.13", + "futures 0.3.15", "jsonrpc-client-transports", "jsonrpc-core", "parity-scale-codec", @@ -9597,7 +9692,7 @@ name = "substrate-frame-rpc-system" version = "3.0.0" dependencies = [ "frame-system-rpc-runtime-api", - "futures 0.3.13", + "futures 0.3.15", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", @@ -9636,7 +9731,7 @@ version = "2.0.1" dependencies = [ "async-trait", "futures 0.1.31", - "futures 0.3.13", + "futures 0.3.15", "hash-db", "hex", "parity-scale-codec", @@ -9666,7 +9761,7 @@ dependencies = [ "frame-support", "frame-system", "frame-system-rpc-runtime-api", - "futures 0.3.13", + "futures 0.3.15", "log", "memory-db", "pallet-babe", @@ -9706,7 +9801,7 @@ dependencies = [ name = "substrate-test-runtime-client" version = "2.0.0" dependencies = [ - "futures 0.3.13", + "futures 0.3.15", "parity-scale-codec", "sc-block-builder", "sc-client-api", @@ -9727,7 +9822,7 @@ name = "substrate-test-runtime-transaction-pool" version = "2.0.0" dependencies = [ "derive_more", - "futures 0.3.13", + "futures 0.3.15", "parity-scale-codec", "parking_lot 0.11.1", "sc-transaction-graph", @@ -9741,7 +9836,7 @@ dependencies = [ name = "substrate-test-utils" version = "3.0.0" dependencies = [ - "futures 0.3.13", + "futures 0.3.15", "sc-service", "substrate-test-utils-derive", "tokio 0.2.25", @@ -9863,7 +9958,7 @@ version = "0.9.0" dependencies = [ "env_logger 0.7.1", "frame-system", - "futures 0.3.13", + "futures 0.3.15", "jsonrpc-core", "log", "sc-basic-authorship", @@ -10085,7 +10180,7 @@ dependencies = [ "pin-project-lite 0.1.12", "signal-hook-registry", "slab", - "tokio-macros 0.2.6", + "tokio-macros", "winapi 0.3.9", ] @@ -10097,7 +10192,6 @@ checksum = "bd3076b5c8cc18138b8f8814895c11eb4de37114a5d127bafdc5e55798ceef37" dependencies = [ "autocfg", "pin-project-lite 0.2.6", - "tokio-macros 1.2.0", ] [[package]] @@ -10175,17 +10269,6 @@ dependencies = [ "syn", ] -[[package]] -name = "tokio-macros" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c49e3df43841dafb86046472506755d8501c5615673955f6aa17181125d13c37" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "tokio-named-pipes" version = "0.1.0" @@ -10227,7 +10310,19 @@ dependencies = [ "futures-core", "rustls 0.18.1", "tokio 0.2.25", - "webpki 0.21.4", + "webpki", +] + +[[package]] +name = "tokio-rustls" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03d15e5669243a45f630a5167d101b942174ca94b615445b2057eace1c818736" +dependencies = [ + "futures-core", + "rustls 0.19.1", + "tokio 0.2.25", + "webpki", ] [[package]] @@ -10333,6 +10428,7 @@ checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" dependencies = [ "bytes 0.5.6", "futures-core", + "futures-io", "futures-sink", "log", "pin-project-lite 0.1.12", @@ -10935,7 +11031,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be0ecb0db480561e9a7642b5d3e4187c128914e58aa84330b9493e3eb68c5e7f" dependencies = [ - "futures 0.3.13", + "futures 0.3.15", "js-sys", "parking_lot 0.11.1", "pin-utils", @@ -11219,23 +11315,13 @@ dependencies = [ "untrusted", ] -[[package]] -name = "webpki" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "webpki-roots" version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82015b7e0b8bad8185994674a13a93306bea76cf5a16c5a181382fd3a5ec2376" dependencies = [ - "webpki 0.21.4", + "webpki", ] [[package]] @@ -11348,7 +11434,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7d9028f208dd5e63c614be69f115c1b53cacc1111437d4c765185856666c107" dependencies = [ - "futures 0.3.13", + "futures 0.3.15", "log", "nohash-hasher", "parking_lot 0.11.1", diff --git a/utils/frame/remote-externalities/Cargo.toml b/utils/frame/remote-externalities/Cargo.toml index 4fe0cf979c1b4..8f62d977baedd 100644 --- a/utils/frame/remote-externalities/Cargo.toml +++ b/utils/frame/remote-externalities/Cargo.toml @@ -13,8 +13,8 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee-ws-client = { version = "=0.2.0-alpha.6", default-features = false } -jsonrpsee-proc-macros = "=0.2.0-alpha.6" +jsonrpsee-ws-client = { version = "0.2.0", default-features = false, features = ["tokio02"] } +jsonrpsee-proc-macros = "0.2.0" hex = "0.4.0" env_logger = "0.8.2" @@ -28,7 +28,7 @@ sp-core = { version = "3.0.0", path = "../../../primitives/core" } sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } [dev-dependencies] -tokio = { version = "1.6.0", features = ["macros", "rt"] } +tokio = { version = "0.2", features = ["macros", "rt-threaded"] } pallet-elections-phragmen = { path = "../../../frame/elections-phragmen", version = "4.0.0" } frame-support = { path = "../../../frame/support", version = "3.0.0" } From b493dd3fa5d7f07c369562004870046a53a3f3c8 Mon Sep 17 00:00:00 2001 From: Zeke Mostov <32168567+emostov@users.noreply.github.com> Date: Tue, 8 Jun 2021 10:26:08 -0700 Subject: [PATCH 18/61] Small doc updates to `election-provider-multi-phase` (#9041) * Small doc updates to election-provider-multi-phase * small change * Improve challenge phase docs * An honest --- .../election-provider-multi-phase/src/lib.rs | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 9bec5cc4bd310..a4ca89a417e0f 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -45,6 +45,7 @@ //! Each of the phases can be disabled by essentially setting their length to zero. If both phases //! have length zero, then the pallet essentially runs only the fallback strategy, denoted by //! [`Config::Fallback`]. +//! //! ### Signed Phase //! //! In the signed phase, solutions (of type [`RawSolution`]) are submitted and queued on chain. A @@ -158,15 +159,15 @@ //! //! ## Future Plans //! -//! **Challenge Phase**. We plan adding a third phase to the pallet, called the challenge phase. -//! This is phase in which no further solutions are processed, and the current best solution might +//! **Challenge Phase**. We plan on adding a third phase to the pallet, called the challenge phase. +//! This is a phase in which no further solutions are processed, and the current best solution might //! be challenged by anyone (signed or unsigned). The main plan here is to enforce the solution to //! be PJR. Checking PJR on-chain is quite expensive, yet proving that a solution is **not** PJR is -//! rather cheap. If a queued solution is challenged: +//! rather cheap. If a queued solution is successfully proven bad: //! //! 1. We must surely slash whoever submitted that solution (might be a challenge for unsigned //! solutions). -//! 2. It is probably fine to fallback to the on-chain election, as we expect this to happen rarely. +//! 2. We will fallback to the emergency strategy (likely extending the current era). //! //! **Bailing out**. The functionality of bailing out of a queued solution is nice. A miner can //! submit a solution as soon as they _think_ it is high probability feasible, and do the checks @@ -174,11 +175,11 @@ //! portion of the bond). //! //! **Conditionally open unsigned phase**: Currently, the unsigned phase is always opened. This is -//! useful because an honest validation will run our OCW code, which should be good enough to trump -//! a mediocre or malicious signed submission (assuming in the absence of honest signed bots). If an -//! when the signed submissions are checked against an absolute measure (e.g. PJR), then we can only -//! open the unsigned phase in extreme conditions (i.e. "not good signed solution received") to -//! spare some work in the validators +//! useful because an honest validator will run substrate OCW code, which should be good enough to trump +//! a mediocre or malicious signed submission (assuming in the absence of honest signed bots). +//! If there are signed submissions, they can be checked against an absolute measure (e.g. PJR), +//! then we can only open the unsigned phase in extreme conditions (i.e. "no good signed solution +//! received") to spare some work for the active validators. //! //! **Allow smaller solutions and build up**: For now we only allow solutions that are exactly //! [`DesiredTargets`], no more, no less. Over time, we can change this to a [min, max] where any From f775d0de6bddeba53ab35bc662fb076291e5260e Mon Sep 17 00:00:00 2001 From: David Date: Tue, 8 Jun 2021 20:10:16 +0200 Subject: [PATCH 19/61] Misc telemetry polish (#8484) * Remove TelemetryWorker::with_transport Make logging more useful * Re-instate TelemetryWorker::with_transport * Fix typo, don't spam --- client/telemetry/src/lib.rs | 22 +++----------------- client/telemetry/src/node.rs | 40 ++++++++++++++++++++---------------- 2 files changed, 25 insertions(+), 37 deletions(-) diff --git a/client/telemetry/src/lib.rs b/client/telemetry/src/lib.rs index 06c82d44ab381..842d89d7edf07 100644 --- a/client/telemetry/src/lib.rs +++ b/client/telemetry/src/lib.rs @@ -122,21 +122,11 @@ impl TelemetryWorker { /// /// Only one is needed per process. pub fn new(buffer_size: usize) -> Result { - let transport = initialize_transport(None)?; - let (message_sender, message_receiver) = mpsc::channel(buffer_size); - let (register_sender, register_receiver) = mpsc::unbounded(); - - Ok(Self { - message_receiver, - message_sender, - register_receiver, - register_sender, - id_counter: Arc::new(atomic::AtomicU64::new(1)), - transport, - }) + Self::with_transport(buffer_size, None) } - /// Instantiate a new [`TelemetryWorker`] which can run in background. + /// Instantiate a new [`TelemetryWorker`] with the given [`ExtTransport`] + /// which can run in background. /// /// Only one is needed per process. pub fn with_transport(buffer_size: usize, transport: Option) -> Result { @@ -312,12 +302,6 @@ impl TelemetryWorker { for (node_max_verbosity, addr) in nodes { if verbosity > *node_max_verbosity { - log::trace!( - target: "telemetry", - "Skipping {} for log entry with verbosity {:?}", - addr, - verbosity, - ); continue; } diff --git a/client/telemetry/src/node.rs b/client/telemetry/src/node.rs index 2d1a04b00a4cd..9ac7ada4e5d66 100644 --- a/client/telemetry/src/node.rs +++ b/client/telemetry/src/node.rs @@ -73,8 +73,9 @@ enum NodeSocket { impl NodeSocket { fn wait_reconnect() -> NodeSocket { - let random_delay = rand::thread_rng().gen_range(5, 10); + let random_delay = rand::thread_rng().gen_range(10, 20); let delay = Delay::new(Duration::from_secs(random_delay)); + log::trace!(target: "telemetry", "Pausing for {} secs before reconnecting", random_delay); NodeSocket::WaitingReconnect(delay) } } @@ -214,11 +215,11 @@ where }, NodeSocket::ReconnectNow => match self.transport.clone().dial(self.addr.clone()) { Ok(d) => { - log::debug!(target: "telemetry", "Started dialing {}", self.addr); + log::trace!(target: "telemetry", "Re-dialing {}", self.addr); socket = NodeSocket::Dialing(d); } Err(err) => { - log::warn!(target: "telemetry", "❌ Error while dialing {}: {:?}", self.addr, err); + log::warn!(target: "telemetry", "❌ Error while re-dialing {}: {:?}", self.addr, err); socket = NodeSocket::wait_reconnect(); } }, @@ -236,16 +237,18 @@ where } }; - // The Dispatcher blocks when the Node sinks blocks. This is why it is important that the - // Node sinks doesn't go into "Pending" state while waiting for reconnection but rather + // The Dispatcher blocks when the Node syncs blocks. This is why it is important that the + // Node sinks don't go into "Pending" state while waiting for reconnection but rather // discard the excess of telemetry messages. Poll::Ready(Ok(())) } fn start_send(mut self: Pin<&mut Self>, item: TelemetryPayload) -> Result<(), Self::Error> { + // Any buffered outgoing telemetry messages are discarded while (re-)connecting. match &mut self.socket { NodeSocket::Connected(conn) => match serde_json::to_vec(&item) { Ok(data) => { + log::trace!(target: "telemetry", "Sending {} bytes", data.len()); let _ = conn.sink.start_send_unpin(data); } Err(err) => log::debug!( @@ -254,18 +257,14 @@ where err, ), }, - _socket => { - log::trace!( - target: "telemetry", - "Message has been discarded: {}", - serde_json::to_string(&item) - .unwrap_or_else(|err| format!( - "could not be serialized ({}): {:?}", - err, - item, - )), - ); - } + // We are currently dialing the node. + NodeSocket::Dialing(_) => log::trace!(target: "telemetry", "Dialing"), + // A new connection should be started as soon as possible. + NodeSocket::ReconnectNow => log::trace!(target: "telemetry", "Reconnecting"), + // Waiting before attempting to dial again. + NodeSocket::WaitingReconnect(_) => {} + // Temporary transition state. + NodeSocket::Poisoned => log::trace!(target: "telemetry", "Poisoned"), } Ok(()) } @@ -273,7 +272,12 @@ where fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { match &mut self.socket { NodeSocket::Connected(conn) => match conn.sink.poll_flush_unpin(cx) { - Poll::Ready(Err(_)) => { + Poll::Ready(Err(e)) => { + // When `telemetry` closes the websocket connection we end + // up here, which is sub-optimal. See + // https://github.com/libp2p/rust-libp2p/issues/2021 for + // what we could do to improve this. + log::trace!(target: "telemetry", "[poll_flush] Error: {:?}", e); self.socket = NodeSocket::wait_reconnect(); Poll::Ready(Ok(())) } From 6749c701900747accaa35eaf2101586f09baef9e Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Tue, 8 Jun 2021 22:54:06 +0200 Subject: [PATCH 20/61] put the validate_unsigned implementation inside the pallet definition (#9044) Co-authored-by: Shawn Tabrizi --- frame/babe/src/equivocation.rs | 14 +++++++------- frame/babe/src/lib.rs | 12 ++++++++++++ frame/grandpa/src/equivocation.rs | 14 +++++++------- frame/grandpa/src/lib.rs | 12 ++++++++++++ 4 files changed, 38 insertions(+), 14 deletions(-) diff --git a/frame/babe/src/equivocation.rs b/frame/babe/src/equivocation.rs index 0fd74882c1b75..e9017205c6b58 100644 --- a/frame/babe/src/equivocation.rs +++ b/frame/babe/src/equivocation.rs @@ -179,12 +179,12 @@ where } } -/// A `ValidateUnsigned` implementation that restricts calls to `report_equivocation_unsigned` -/// to local calls (i.e. extrinsics generated on this node) or that already in a block. This -/// guarantees that only block authors can include unsigned equivocation reports. -impl frame_support::unsigned::ValidateUnsigned for Pallet { - type Call = Call; - fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { +/// Methods for the `ValidateUnsigned` implementation: +/// It restricts calls to `report_equivocation_unsigned` to local calls (i.e. extrinsics generated +/// on this node) or that already in a block. This guarantees that only block authors can include +/// unsigned equivocation reports. +impl Pallet { + pub fn validate_unsigned(source: TransactionSource, call: &Call) -> TransactionValidity { if let Call::report_equivocation_unsigned(equivocation_proof, key_owner_proof) = call { // discard equivocation report not coming from the local node match source { @@ -221,7 +221,7 @@ impl frame_support::unsigned::ValidateUnsigned for Pallet { } } - fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { + pub fn pre_dispatch(call: &Call) -> Result<(), TransactionValidityError> { if let Call::report_equivocation_unsigned(equivocation_proof, key_owner_proof) = call { is_known_offence::(equivocation_proof, key_owner_proof) } else { diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index 6eecf2675291c..a0a9e01eaa26c 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -407,6 +407,18 @@ pub mod pallet { Ok(()) } } + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { + Self::validate_unsigned(source, call) + } + + fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { + Self::pre_dispatch(call) + } + } } /// A BABE public key diff --git a/frame/grandpa/src/equivocation.rs b/frame/grandpa/src/equivocation.rs index 24f56247d30ef..0383d2d9a9be6 100644 --- a/frame/grandpa/src/equivocation.rs +++ b/frame/grandpa/src/equivocation.rs @@ -200,12 +200,12 @@ pub struct GrandpaTimeSlot { pub round: RoundNumber, } -/// A `ValidateUnsigned` implementation that restricts calls to `report_equivocation_unsigned` -/// to local calls (i.e. extrinsics generated on this node) or that already in a block. This -/// guarantees that only block authors can include unsigned equivocation reports. -impl frame_support::unsigned::ValidateUnsigned for Pallet { - type Call = Call; - fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { +/// Methods for the `ValidateUnsigned` implementation: +/// It restricts calls to `report_equivocation_unsigned` to local calls (i.e. extrinsics generated +/// on this node) or that already in a block. This guarantees that only block authors can include +/// unsigned equivocation reports. +impl Pallet { + pub fn validate_unsigned(source: TransactionSource, call: &Call) -> TransactionValidity { if let Call::report_equivocation_unsigned(equivocation_proof, key_owner_proof) = call { // discard equivocation report not coming from the local node match source { @@ -243,7 +243,7 @@ impl frame_support::unsigned::ValidateUnsigned for Pallet { } } - fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { + pub fn pre_dispatch(call: &Call) -> Result<(), TransactionValidityError> { if let Call::report_equivocation_unsigned(equivocation_proof, key_owner_proof) = call { is_known_offence::(equivocation_proof, key_owner_proof) } else { diff --git a/frame/grandpa/src/lib.rs b/frame/grandpa/src/lib.rs index f6edb07ccc6b6..952e0d646135b 100644 --- a/frame/grandpa/src/lib.rs +++ b/frame/grandpa/src/lib.rs @@ -341,6 +341,18 @@ pub mod pallet { Pallet::::initialize(&self.authorities) } } + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { + Self::validate_unsigned(source, call) + } + + fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { + Self::pre_dispatch(call) + } + } } pub trait WeightInfo { From d6ac9f551b71d9c7b69afcebfc68ace310ef74ee Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Wed, 9 Jun 2021 02:31:29 -0700 Subject: [PATCH 21/61] Use pallet names to name enum variants (#8990) * Remove unused event_metadata variable * Eliminate mod_name and use pallet names to name enum variants * Rename field name `pallet` to `path` * Use only the pallet name to generate variant names * Use new naming scheme for Event enum in sudo pallet tests * Use new naming scheme for Event enum in offences pallet tests * Use new naming scheme for Event enum in contracts pallet tests * Use new naming scheme for Event enum in collective pallet tests * Use new naming scheme for Event enum in bounties pallet tests * Use new naming scheme for Event enum in balances pallet tests * Use new naming scheme for Event enum in assets pallet tests * Use new naming scheme for Event enum in frame support tests * Use new naming scheme for Event enum in tips pallet tests * Use new naming scheme for Event enum in transaction payment pallet tests * Use new naming scheme for GenesisConfig fields in example pallet tests * Use new naming scheme for GenesisConfig fields in elections pallet tests * Use new naming scheme for Event enum in election provider multi-phase pallet tests * Use new naming scheme for Event enum in elections phragmen pallet tests * Use new naming scheme for GenesisConfig fields in chain spec * Use new naming scheme for Event enum in staking pallet mock * Use new naming scheme for GenesisConfig fields in node-template chain spec * Use new naming scheme for GenesisConfig fields in node-testing chain spec * Use new naming scheme for Event enum in node executor tests * Use new naming scheme for GenesisConfig fields in transaction storage pallet mock * Refactor match conditions --- bin/node-template/node/src/chain_spec.rs | 10 +-- bin/node/cli/src/chain_spec.rs | 40 +++++----- bin/node/executor/tests/basic.rs | 22 +++--- bin/node/testing/src/genesis.rs | 40 +++++----- frame/assets/src/tests.rs | 2 +- frame/balances/src/tests.rs | 26 +++---- frame/balances/src/tests_local.rs | 10 +-- frame/balances/src/tests_reentrancy.rs | 12 +-- frame/bounties/src/tests.rs | 2 +- frame/collective/src/lib.rs | 74 +++++++++---------- frame/contracts/src/exec.rs | 2 +- frame/contracts/src/tests.rs | 68 ++++++++--------- .../election-provider-multi-phase/src/mock.rs | 2 +- frame/elections-phragmen/src/lib.rs | 14 ++-- frame/elections/src/mock.rs | 2 +- frame/example/src/tests.rs | 6 +- frame/offences/src/tests.rs | 4 +- frame/staking/src/mock.rs | 2 +- frame/sudo/src/tests.rs | 10 +-- .../src/construct_runtime/expand/config.rs | 14 ++-- .../src/construct_runtime/expand/event.rs | 61 ++++++--------- .../src/construct_runtime/expand/metadata.rs | 10 +-- .../src/construct_runtime/expand/origin.rs | 62 +++++++--------- .../procedural/src/construct_runtime/mod.rs | 8 +- .../procedural/src/construct_runtime/parse.rs | 30 +------- frame/support/test/tests/construct_runtime.rs | 18 ++--- frame/support/test/tests/instance.rs | 12 +-- frame/support/test/tests/pallet.rs | 10 +-- frame/support/test/tests/pallet_instance.rs | 16 ++-- frame/tips/src/tests.rs | 2 +- frame/transaction-payment/src/lib.rs | 4 +- frame/transaction-storage/src/mock.rs | 6 +- 32 files changed, 274 insertions(+), 327 deletions(-) diff --git a/bin/node-template/node/src/chain_spec.rs b/bin/node-template/node/src/chain_spec.rs index f7ed87251391e..5093a77b571e9 100644 --- a/bin/node-template/node/src/chain_spec.rs +++ b/bin/node-template/node/src/chain_spec.rs @@ -134,22 +134,22 @@ fn testnet_genesis( _enable_println: bool, ) -> GenesisConfig { GenesisConfig { - frame_system: SystemConfig { + system: SystemConfig { // Add Wasm runtime to storage. code: wasm_binary.to_vec(), changes_trie_config: Default::default(), }, - pallet_balances: BalancesConfig { + balances: BalancesConfig { // Configure endowed accounts with initial balance of 1 << 60. balances: endowed_accounts.iter().cloned().map(|k|(k, 1 << 60)).collect(), }, - pallet_aura: AuraConfig { + aura: AuraConfig { authorities: initial_authorities.iter().map(|x| (x.0.clone())).collect(), }, - pallet_grandpa: GrandpaConfig { + grandpa: GrandpaConfig { authorities: initial_authorities.iter().map(|x| (x.1.clone(), 1)).collect(), }, - pallet_sudo: SudoConfig { + sudo: SudoConfig { // Assign network admin rights. key: root_key, }, diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs index 3454aa83c24d4..e3ba16b9de6f3 100644 --- a/bin/node/cli/src/chain_spec.rs +++ b/bin/node/cli/src/chain_spec.rs @@ -261,19 +261,19 @@ pub fn testnet_genesis( const STASH: Balance = ENDOWMENT / 1000; GenesisConfig { - frame_system: SystemConfig { + system: SystemConfig { code: wasm_binary_unwrap().to_vec(), changes_trie_config: Default::default(), }, - pallet_balances: BalancesConfig { + balances: BalancesConfig { balances: endowed_accounts.iter().cloned() .map(|x| (x, ENDOWMENT)) .collect() }, - pallet_indices: IndicesConfig { + indices: IndicesConfig { indices: vec![], }, - pallet_session: SessionConfig { + session: SessionConfig { keys: initial_authorities.iter().map(|x| { (x.0.clone(), x.0.clone(), session_keys( x.2.clone(), @@ -283,7 +283,7 @@ pub fn testnet_genesis( )) }).collect::>(), }, - pallet_staking: StakingConfig { + staking: StakingConfig { validator_count: initial_authorities.len() as u32, minimum_validator_count: initial_authorities.len() as u32, invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect(), @@ -291,41 +291,41 @@ pub fn testnet_genesis( stakers, .. Default::default() }, - pallet_democracy: DemocracyConfig::default(), - pallet_elections_phragmen: ElectionsConfig { + democracy: DemocracyConfig::default(), + elections: ElectionsConfig { members: endowed_accounts.iter() .take((num_endowed_accounts + 1) / 2) .cloned() .map(|member| (member, STASH)) .collect(), }, - pallet_collective_Instance1: CouncilConfig::default(), - pallet_collective_Instance2: TechnicalCommitteeConfig { + council: CouncilConfig::default(), + technical_committee: TechnicalCommitteeConfig { members: endowed_accounts.iter() .take((num_endowed_accounts + 1) / 2) .cloned() .collect(), phantom: Default::default(), }, - pallet_sudo: SudoConfig { + sudo: SudoConfig { key: root_key, }, - pallet_babe: BabeConfig { + babe: BabeConfig { authorities: vec![], epoch_config: Some(node_runtime::BABE_GENESIS_EPOCH_CONFIG), }, - pallet_im_online: ImOnlineConfig { + im_online: ImOnlineConfig { keys: vec![], }, - pallet_authority_discovery: AuthorityDiscoveryConfig { + authority_discovery: AuthorityDiscoveryConfig { keys: vec![], }, - pallet_grandpa: GrandpaConfig { + grandpa: GrandpaConfig { authorities: vec![], }, - pallet_membership_Instance1: Default::default(), - pallet_treasury: Default::default(), - pallet_society: SocietyConfig { + technical_membership: Default::default(), + treasury: Default::default(), + society: SocietyConfig { members: endowed_accounts.iter() .take((num_endowed_accounts + 1) / 2) .cloned() @@ -333,9 +333,9 @@ pub fn testnet_genesis( pot: 0, max_members: 999, }, - pallet_vesting: Default::default(), - pallet_gilt: Default::default(), - pallet_transaction_storage: Default::default(), + vesting: Default::default(), + gilt: Default::default(), + transaction_storage: Default::default(), } } diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index fe3ae5f14cc37..8c7b1eae5dec1 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -347,14 +347,14 @@ fn full_native_block_import_works() { let events = vec![ EventRecord { phase: Phase::ApplyExtrinsic(0), - event: Event::frame_system(frame_system::Event::ExtrinsicSuccess( + event: Event::System(frame_system::Event::ExtrinsicSuccess( DispatchInfo { weight: timestamp_weight, class: DispatchClass::Mandatory, ..Default::default() } )), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::pallet_balances(pallet_balances::Event::Transfer( + event: Event::Balances(pallet_balances::Event::Transfer( alice().into(), bob().into(), 69 * DOLLARS, @@ -363,12 +363,12 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::pallet_treasury(pallet_treasury::RawEvent::Deposit(fees * 8 / 10)), + event: Event::Treasury(pallet_treasury::RawEvent::Deposit(fees * 8 / 10)), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::frame_system(frame_system::Event::ExtrinsicSuccess( + event: Event::System(frame_system::Event::ExtrinsicSuccess( DispatchInfo { weight: transfer_weight, ..Default::default() } )), topics: vec![], @@ -399,14 +399,14 @@ fn full_native_block_import_works() { let events = vec![ EventRecord { phase: Phase::ApplyExtrinsic(0), - event: Event::frame_system(frame_system::Event::ExtrinsicSuccess( + event: Event::System(frame_system::Event::ExtrinsicSuccess( DispatchInfo { weight: timestamp_weight, class: DispatchClass::Mandatory, ..Default::default() } )), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::pallet_balances( + event: Event::Balances( pallet_balances::Event::Transfer( bob().into(), alice().into(), @@ -417,19 +417,19 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::pallet_treasury(pallet_treasury::RawEvent::Deposit(fees * 8 / 10)), + event: Event::Treasury(pallet_treasury::RawEvent::Deposit(fees * 8 / 10)), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::frame_system(frame_system::Event::ExtrinsicSuccess( + event: Event::System(frame_system::Event::ExtrinsicSuccess( DispatchInfo { weight: transfer_weight, ..Default::default() } )), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(2), - event: Event::pallet_balances( + event: Event::Balances( pallet_balances::Event::Transfer( alice().into(), bob().into(), @@ -440,12 +440,12 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(2), - event: Event::pallet_treasury(pallet_treasury::RawEvent::Deposit(fees * 8 / 10)), + event: Event::Treasury(pallet_treasury::RawEvent::Deposit(fees * 8 / 10)), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(2), - event: Event::frame_system(frame_system::Event::ExtrinsicSuccess( + event: Event::System(frame_system::Event::ExtrinsicSuccess( DispatchInfo { weight: transfer_weight, ..Default::default() } )), topics: vec![], diff --git a/bin/node/testing/src/genesis.rs b/bin/node/testing/src/genesis.rs index 6f884d1f73b62..3a6d51f1971ed 100644 --- a/bin/node/testing/src/genesis.rs +++ b/bin/node/testing/src/genesis.rs @@ -56,20 +56,20 @@ pub fn config_endowed( ); GenesisConfig { - frame_system: SystemConfig { + system: SystemConfig { changes_trie_config: if support_changes_trie { Some(ChangesTrieConfiguration { digest_interval: 2, digest_levels: 2, }) } else { None }, code: code.map(|x| x.to_vec()).unwrap_or_else(|| wasm_binary_unwrap().to_vec()), }, - pallet_indices: IndicesConfig { + indices: IndicesConfig { indices: vec![], }, - pallet_balances: BalancesConfig { + balances: BalancesConfig { balances: endowed, }, - pallet_session: SessionConfig { + session: SessionConfig { keys: vec![ (dave(), alice(), to_session_keys( &Ed25519Keyring::Alice, @@ -85,7 +85,7 @@ pub fn config_endowed( )), ] }, - pallet_staking: StakingConfig { + staking: StakingConfig { stakers: vec![ (dave(), alice(), 111 * DOLLARS, StakerStatus::Validator), (eve(), bob(), 100 * DOLLARS, StakerStatus::Validator), @@ -97,29 +97,29 @@ pub fn config_endowed( invulnerables: vec![alice(), bob(), charlie()], .. Default::default() }, - pallet_babe: BabeConfig { + babe: BabeConfig { authorities: vec![], epoch_config: Some(BABE_GENESIS_EPOCH_CONFIG), }, - pallet_grandpa: GrandpaConfig { + grandpa: GrandpaConfig { authorities: vec![], }, - pallet_im_online: Default::default(), - pallet_authority_discovery: Default::default(), - pallet_democracy: Default::default(), - pallet_collective_Instance1: Default::default(), - pallet_collective_Instance2: Default::default(), - pallet_membership_Instance1: Default::default(), - pallet_elections_phragmen: Default::default(), - pallet_sudo: Default::default(), - pallet_treasury: Default::default(), - pallet_society: SocietyConfig { + im_online: Default::default(), + authority_discovery: Default::default(), + democracy: Default::default(), + council: Default::default(), + technical_committee: Default::default(), + technical_membership: Default::default(), + elections: Default::default(), + sudo: Default::default(), + treasury: Default::default(), + society: SocietyConfig { members: vec![alice(), bob()], pot: 0, max_members: 999, }, - pallet_vesting: Default::default(), - pallet_gilt: Default::default(), - pallet_transaction_storage: Default::default(), + vesting: Default::default(), + gilt: Default::default(), + transaction_storage: Default::default(), } } diff --git a/frame/assets/src/tests.rs b/frame/assets/src/tests.rs index 3ee8f9a9cfa47..6bef5b962de74 100644 --- a/frame/assets/src/tests.rs +++ b/frame/assets/src/tests.rs @@ -397,7 +397,7 @@ fn transferring_less_than_one_unit_is_fine() { assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 0)); - System::assert_last_event(mock::Event::pallet_assets(crate::Event::Transferred(0, 1, 2, 0))); + System::assert_last_event(mock::Event::Assets(crate::Event::Transferred(0, 1, 2, 0))); }); } diff --git a/frame/balances/src/tests.rs b/frame/balances/src/tests.rs index 43d3c2fc6009e..86004efcf68f6 100644 --- a/frame/balances/src/tests.rs +++ b/frame/balances/src/tests.rs @@ -464,7 +464,7 @@ macro_rules! decl_tests { assert_ok!(Balances::reserve(&1, 110)); assert_ok!(Balances::repatriate_reserved(&1, &2, 41, Status::Free), 0); System::assert_last_event( - Event::pallet_balances(crate::Event::ReserveRepatriated(1, 2, 41, Status::Free)) + Event::Balances(crate::Event::ReserveRepatriated(1, 2, 41, Status::Free)) ); assert_eq!(Balances::reserved_balance(1), 69); assert_eq!(Balances::free_balance(1), 0); @@ -683,18 +683,18 @@ macro_rules! decl_tests { System::set_block_number(2); assert_ok!(Balances::reserve(&1, 10)); - System::assert_last_event(Event::pallet_balances(crate::Event::Reserved(1, 10))); + System::assert_last_event(Event::Balances(crate::Event::Reserved(1, 10))); System::set_block_number(3); assert!(Balances::unreserve(&1, 5).is_zero()); - System::assert_last_event(Event::pallet_balances(crate::Event::Unreserved(1, 5))); + System::assert_last_event(Event::Balances(crate::Event::Unreserved(1, 5))); System::set_block_number(4); assert_eq!(Balances::unreserve(&1, 6), 1); // should only unreserve 5 - System::assert_last_event(Event::pallet_balances(crate::Event::Unreserved(1, 5))); + System::assert_last_event(Event::Balances(crate::Event::Unreserved(1, 5))); }); } @@ -709,9 +709,9 @@ macro_rules! decl_tests { assert_eq!( events(), [ - Event::frame_system(system::Event::NewAccount(1)), - Event::pallet_balances(crate::Event::Endowed(1, 100)), - Event::pallet_balances(crate::Event::BalanceSet(1, 100, 0)), + Event::System(system::Event::NewAccount(1)), + Event::Balances(crate::Event::Endowed(1, 100)), + Event::Balances(crate::Event::BalanceSet(1, 100, 0)), ] ); @@ -721,8 +721,8 @@ macro_rules! decl_tests { assert_eq!( events(), [ - Event::frame_system(system::Event::KilledAccount(1)), - Event::pallet_balances(crate::Event::DustLost(1, 99)), + Event::System(system::Event::KilledAccount(1)), + Event::Balances(crate::Event::DustLost(1, 99)), ] ); }); @@ -739,9 +739,9 @@ macro_rules! decl_tests { assert_eq!( events(), [ - Event::frame_system(system::Event::NewAccount(1)), - Event::pallet_balances(crate::Event::Endowed(1, 100)), - Event::pallet_balances(crate::Event::BalanceSet(1, 100, 0)), + Event::System(system::Event::NewAccount(1)), + Event::Balances(crate::Event::Endowed(1, 100)), + Event::Balances(crate::Event::BalanceSet(1, 100, 0)), ] ); @@ -751,7 +751,7 @@ macro_rules! decl_tests { assert_eq!( events(), [ - Event::frame_system(system::Event::KilledAccount(1)) + Event::System(system::Event::KilledAccount(1)) ] ); }); diff --git a/frame/balances/src/tests_local.rs b/frame/balances/src/tests_local.rs index afa68764573e0..e6de7e64b16a2 100644 --- a/frame/balances/src/tests_local.rs +++ b/frame/balances/src/tests_local.rs @@ -172,9 +172,9 @@ fn emit_events_with_no_existential_deposit_suicide_with_dust() { assert_eq!( events(), [ - Event::frame_system(system::Event::NewAccount(1)), - Event::pallet_balances(crate::Event::Endowed(1, 100)), - Event::pallet_balances(crate::Event::BalanceSet(1, 100, 0)), + Event::System(system::Event::NewAccount(1)), + Event::Balances(crate::Event::Endowed(1, 100)), + Event::Balances(crate::Event::BalanceSet(1, 100, 0)), ] ); @@ -190,8 +190,8 @@ fn emit_events_with_no_existential_deposit_suicide_with_dust() { assert_eq!( events(), [ - Event::frame_system(system::Event::KilledAccount(1)), - Event::pallet_balances(crate::Event::DustLost(1, 1)), + Event::System(system::Event::KilledAccount(1)), + Event::Balances(crate::Event::DustLost(1, 1)), ] ); }); diff --git a/frame/balances/src/tests_reentrancy.rs b/frame/balances/src/tests_reentrancy.rs index a12da8f001d80..caca7d78d0ff5 100644 --- a/frame/balances/src/tests_reentrancy.rs +++ b/frame/balances/src/tests_reentrancy.rs @@ -188,8 +188,8 @@ fn transfer_dust_removal_tst1_should_work() { // Number of events expected is 8 assert_eq!(System::events().len(), 11); - System::assert_has_event(Event::pallet_balances(crate::Event::Transfer(2, 3, 450))); - System::assert_has_event(Event::pallet_balances(crate::Event::DustLost(2, 50))); + System::assert_has_event(Event::Balances(crate::Event::Transfer(2, 3, 450))); + System::assert_has_event(Event::Balances(crate::Event::DustLost(2, 50))); } ); } @@ -220,8 +220,8 @@ fn transfer_dust_removal_tst2_should_work() { // Number of events expected is 8 assert_eq!(System::events().len(), 9); - System::assert_has_event(Event::pallet_balances(crate::Event::Transfer(2, 1, 450))); - System::assert_has_event(Event::pallet_balances(crate::Event::DustLost(2, 50))); + System::assert_has_event(Event::Balances(crate::Event::Transfer(2, 1, 450))); + System::assert_has_event(Event::Balances(crate::Event::DustLost(2, 50))); } ); } @@ -261,11 +261,11 @@ fn repatriating_reserved_balance_dust_removal_should_work() { // Number of events expected is 10 assert_eq!(System::events().len(), 10); - System::assert_has_event(Event::pallet_balances( + System::assert_has_event(Event::Balances( crate::Event::ReserveRepatriated(2, 1, 450, Status::Free), )); - System::assert_last_event(Event::pallet_balances(crate::Event::DustLost(2, 50))); + System::assert_last_event(Event::Balances(crate::Event::DustLost(2, 50))); } ); } diff --git a/frame/bounties/src/tests.rs b/frame/bounties/src/tests.rs index 04cc06ef64b8d..3a53ffd56ac1a 100644 --- a/frame/bounties/src/tests.rs +++ b/frame/bounties/src/tests.rs @@ -161,7 +161,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { fn last_event() -> RawEvent { System::events().into_iter().map(|r| r.event) .filter_map(|e| { - if let Event::pallet_bounties(inner) = e { Some(inner) } else { None } + if let Event::Bounties(inner) = e { Some(inner) } else { None } }) .last() .unwrap() diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index 76e4106978233..a7039887db606 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -1059,15 +1059,15 @@ mod tests { pub fn new_test_ext() -> sp_io::TestExternalities { let mut ext: sp_io::TestExternalities = GenesisConfig { - collective_Instance1: collective::GenesisConfig { + collective: collective::GenesisConfig { members: vec![1, 2, 3], phantom: Default::default(), }, - collective_Instance2: collective::GenesisConfig { + collective_majority: collective::GenesisConfig { members: vec![1, 2, 3, 4, 5], phantom: Default::default(), }, - collective: Default::default(), + default_collective: Default::default(), }.build_storage().unwrap().into(); ext.execute_with(|| System::set_block_number(1)); ext @@ -1107,10 +1107,10 @@ mod tests { let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; assert_eq!(System::events(), vec![ - record(Event::collective_Instance1(RawEvent::Proposed(1, 0, hash.clone(), 3))), - record(Event::collective_Instance1(RawEvent::Voted(2, hash.clone(), true, 2, 0))), - record(Event::collective_Instance1(RawEvent::Closed(hash.clone(), 2, 1))), - record(Event::collective_Instance1(RawEvent::Disapproved(hash.clone()))) + record(Event::Collective(RawEvent::Proposed(1, 0, hash.clone(), 3))), + record(Event::Collective(RawEvent::Voted(2, hash.clone(), true, 2, 0))), + record(Event::Collective(RawEvent::Closed(hash.clone(), 2, 1))), + record(Event::Collective(RawEvent::Disapproved(hash.clone()))) ]); }); } @@ -1169,10 +1169,10 @@ mod tests { let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; assert_eq!(System::events(), vec![ - record(Event::collective_Instance1(RawEvent::Proposed(1, 0, hash.clone(), 3))), - record(Event::collective_Instance1(RawEvent::Voted(2, hash.clone(), true, 2, 0))), - record(Event::collective_Instance1(RawEvent::Closed(hash.clone(), 2, 1))), - record(Event::collective_Instance1(RawEvent::Disapproved(hash.clone()))) + record(Event::Collective(RawEvent::Proposed(1, 0, hash.clone(), 3))), + record(Event::Collective(RawEvent::Voted(2, hash.clone(), true, 2, 0))), + record(Event::Collective(RawEvent::Closed(hash.clone(), 2, 1))), + record(Event::Collective(RawEvent::Disapproved(hash.clone()))) ]); }); } @@ -1194,11 +1194,11 @@ mod tests { let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; assert_eq!(System::events(), vec![ - record(Event::collective_Instance1(RawEvent::Proposed(1, 0, hash.clone(), 3))), - record(Event::collective_Instance1(RawEvent::Voted(2, hash.clone(), true, 2, 0))), - record(Event::collective_Instance1(RawEvent::Closed(hash.clone(), 3, 0))), - record(Event::collective_Instance1(RawEvent::Approved(hash.clone()))), - record(Event::collective_Instance1(RawEvent::Executed(hash.clone(), Err(DispatchError::BadOrigin)))) + record(Event::Collective(RawEvent::Proposed(1, 0, hash.clone(), 3))), + record(Event::Collective(RawEvent::Voted(2, hash.clone(), true, 2, 0))), + record(Event::Collective(RawEvent::Closed(hash.clone(), 3, 0))), + record(Event::Collective(RawEvent::Approved(hash.clone()))), + record(Event::Collective(RawEvent::Executed(hash.clone(), Err(DispatchError::BadOrigin)))) ]); }); } @@ -1221,12 +1221,12 @@ mod tests { let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; assert_eq!(System::events(), vec![ - record(Event::collective_Instance2(RawEvent::Proposed(1, 0, hash.clone(), 5))), - record(Event::collective_Instance2(RawEvent::Voted(2, hash.clone(), true, 2, 0))), - record(Event::collective_Instance2(RawEvent::Voted(3, hash.clone(), true, 3, 0))), - record(Event::collective_Instance2(RawEvent::Closed(hash.clone(), 5, 0))), - record(Event::collective_Instance2(RawEvent::Approved(hash.clone()))), - record(Event::collective_Instance2(RawEvent::Executed(hash.clone(), Err(DispatchError::BadOrigin)))) + record(Event::CollectiveMajority(RawEvent::Proposed(1, 0, hash.clone(), 5))), + record(Event::CollectiveMajority(RawEvent::Voted(2, hash.clone(), true, 2, 0))), + record(Event::CollectiveMajority(RawEvent::Voted(3, hash.clone(), true, 3, 0))), + record(Event::CollectiveMajority(RawEvent::Closed(hash.clone(), 5, 0))), + record(Event::CollectiveMajority(RawEvent::Approved(hash.clone()))), + record(Event::CollectiveMajority(RawEvent::Executed(hash.clone(), Err(DispatchError::BadOrigin)))) ]); }); } @@ -1321,7 +1321,7 @@ mod tests { assert_eq!(System::events(), vec![ EventRecord { phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Proposed( + event: Event::Collective(RawEvent::Proposed( 1, 0, hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), @@ -1449,7 +1449,7 @@ mod tests { assert_eq!(System::events(), vec![ EventRecord { phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Proposed( + event: Event::Collective(RawEvent::Proposed( 1, 0, hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), @@ -1459,7 +1459,7 @@ mod tests { }, EventRecord { phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Voted( + event: Event::Collective(RawEvent::Voted( 1, hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), false, @@ -1592,7 +1592,7 @@ mod tests { assert_eq!(System::events(), vec![ EventRecord { phase: Phase::Initialization, - event: Event::collective_Instance1( + event: Event::Collective( RawEvent::Proposed( 1, 0, @@ -1603,7 +1603,7 @@ mod tests { }, EventRecord { phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Voted( + event: Event::Collective(RawEvent::Voted( 2, hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), false, @@ -1614,14 +1614,14 @@ mod tests { }, EventRecord { phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Closed( + event: Event::Collective(RawEvent::Closed( hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), 1, 1, )), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Disapproved( + event: Event::Collective(RawEvent::Disapproved( hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), )), topics: vec![], @@ -1644,7 +1644,7 @@ mod tests { assert_eq!(System::events(), vec![ EventRecord { phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Proposed( + event: Event::Collective(RawEvent::Proposed( 1, 0, hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), @@ -1654,7 +1654,7 @@ mod tests { }, EventRecord { phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Voted( + event: Event::Collective(RawEvent::Voted( 2, hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), true, @@ -1665,21 +1665,21 @@ mod tests { }, EventRecord { phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Closed( + event: Event::Collective(RawEvent::Closed( hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), 2, 0, )), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Approved( + event: Event::Collective(RawEvent::Approved( hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), )), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Executed( + event: Event::Collective(RawEvent::Executed( hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), Err(DispatchError::BadOrigin), )), @@ -1731,9 +1731,9 @@ mod tests { assert_ok!(Collective::disapprove_proposal(Origin::root(), hash.clone())); let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; assert_eq!(System::events(), vec![ - record(Event::collective_Instance1(RawEvent::Proposed(1, 0, hash.clone(), 2))), - record(Event::collective_Instance1(RawEvent::Voted(2, hash.clone(), true, 2, 0))), - record(Event::collective_Instance1(RawEvent::Disapproved(hash.clone()))), + record(Event::Collective(RawEvent::Proposed(1, 0, hash.clone(), 2))), + record(Event::Collective(RawEvent::Voted(2, hash.clone(), true, 2, 0))), + record(Event::Collective(RawEvent::Disapproved(hash.clone()))), ]); }) } diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index f3a981347c981..3739ab77e2b6c 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -1357,7 +1357,7 @@ mod tests { >::events() .into_iter() .filter_map(|meta| match meta.event { - MetaEvent::pallet_contracts(contract_event) => Some(contract_event), + MetaEvent::Contracts(contract_event) => Some(contract_event), _ => None, }) .collect() diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 75ea8d9bd89b6..e066a369af0be 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -481,50 +481,50 @@ fn instantiate_and_call_and_deposit_event() { assert_eq!(System::events(), vec![ EventRecord { phase: Phase::Initialization, - event: Event::frame_system(frame_system::Event::NewAccount(ALICE.clone())), + event: Event::System(frame_system::Event::NewAccount(ALICE.clone())), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_balances( + event: Event::Balances( pallet_balances::Event::Endowed(ALICE, 1_000_000) ), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::frame_system(frame_system::Event::NewAccount(addr.clone())), + event: Event::System(frame_system::Event::NewAccount(addr.clone())), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_balances( + event: Event::Balances( pallet_balances::Event::Endowed(addr.clone(), subsistence * 100) ), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_balances( + event: Event::Balances( pallet_balances::Event::Transfer(ALICE, addr.clone(), subsistence * 100) ), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_contracts(crate::Event::CodeStored(code_hash.into())), + event: Event::Contracts(crate::Event::CodeStored(code_hash.into())), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_contracts( + event: Event::Contracts( crate::Event::ContractEmitted(addr.clone(), vec![1, 2, 3, 4]) ), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_contracts(crate::Event::Instantiated(ALICE, addr.clone())), + event: Event::Contracts(crate::Event::Instantiated(ALICE, addr.clone())), topics: vec![], }, ]); @@ -1210,45 +1210,45 @@ fn restoration( let mut events = vec![ EventRecord { phase: Phase::Initialization, - event: Event::frame_system(frame_system::Event::NewAccount(ALICE)), + event: Event::System(frame_system::Event::NewAccount(ALICE)), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_balances( + event: Event::Balances( pallet_balances::Event::Endowed(ALICE, 1_000_000) ), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::frame_system(frame_system::Event::NewAccount(addr_bob.clone())), + event: Event::System(frame_system::Event::NewAccount(addr_bob.clone())), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_balances( + event: Event::Balances( pallet_balances::Event::Endowed(addr_bob.clone(), 30_000) ), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_balances( + event: Event::Balances( pallet_balances::Event::Transfer(ALICE, addr_bob.clone(), 30_000) ), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_contracts( + event: Event::Contracts( crate::Event::CodeStored(set_rent_code_hash.into()) ), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_contracts( + event: Event::Contracts( crate::Event::Instantiated(ALICE, addr_bob.clone()) ), topics: vec![], @@ -1271,26 +1271,26 @@ fn restoration( events.extend([ EventRecord { phase: Phase::Initialization, - event: Event::frame_system(frame_system::Event::NewAccount(addr_dummy.clone())), + event: Event::System(frame_system::Event::NewAccount(addr_dummy.clone())), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_balances( + event: Event::Balances( pallet_balances::Event::Endowed(addr_dummy.clone(), 20_000) ), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_balances( + event: Event::Balances( pallet_balances::Event::Transfer(ALICE, addr_dummy.clone(), 20_000) ), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_contracts( + event: Event::Contracts( crate::Event::Instantiated(ALICE, addr_dummy.clone()) ), topics: vec![], @@ -1418,46 +1418,46 @@ fn restoration( assert_eq!(System::events(), vec![ EventRecord { phase: Phase::Initialization, - event: Event::pallet_contracts(crate::Event::Evicted(addr_bob)), + event: Event::Contracts(crate::Event::Evicted(addr_bob)), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::frame_system(frame_system::Event::NewAccount(CHARLIE)), + event: Event::System(frame_system::Event::NewAccount(CHARLIE)), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_balances(pallet_balances::Event::Endowed(CHARLIE, 1_000_000)), + event: Event::Balances(pallet_balances::Event::Endowed(CHARLIE, 1_000_000)), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::frame_system(frame_system::Event::NewAccount(addr_django.clone())), + event: Event::System(frame_system::Event::NewAccount(addr_django.clone())), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_balances(pallet_balances::Event::Endowed(addr_django.clone(), 30_000)), + event: Event::Balances(pallet_balances::Event::Endowed(addr_django.clone(), 30_000)), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_balances( + event: Event::Balances( pallet_balances::Event::Transfer(CHARLIE, addr_django.clone(), 30_000) ), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_contracts( + event: Event::Contracts( crate::Event::CodeStored(restoration_code_hash) ), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_contracts( + event: Event::Contracts( crate::Event::Instantiated(CHARLIE, addr_django.clone()) ), topics: vec![], @@ -1491,17 +1491,17 @@ fn restoration( assert_eq!(System::events(), vec![ EventRecord { phase: Phase::Initialization, - event: Event::pallet_contracts(crate::Event::CodeRemoved(restoration_code_hash)), + event: Event::Contracts(crate::Event::CodeRemoved(restoration_code_hash)), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::frame_system(system::Event::KilledAccount(addr_django.clone())), + event: Event::System(system::Event::KilledAccount(addr_django.clone())), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_contracts( + event: Event::Contracts( crate::Event::Restored( addr_django, addr_bob, bob_contract.code_hash, 50 ) @@ -1729,26 +1729,26 @@ fn self_destruct_works() { pretty_assertions::assert_eq!(System::events(), vec![ EventRecord { phase: Phase::Initialization, - event: Event::frame_system( + event: Event::System( frame_system::Event::KilledAccount(addr.clone()) ), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_balances( + event: Event::Balances( pallet_balances::Event::Transfer(addr.clone(), DJANGO, 93_086) ), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_contracts(crate::Event::CodeRemoved(code_hash)), + event: Event::Contracts(crate::Event::CodeRemoved(code_hash)), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_contracts( + event: Event::Contracts( crate::Event::Terminated(addr.clone(), DJANGO) ), topics: vec![], diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index 830df099b5d08..bd035aaf82969 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -75,7 +75,7 @@ pub(crate) fn multi_phase_events() -> Vec> { System::events() .into_iter() .map(|r| r.event) - .filter_map(|e| if let Event::multi_phase(inner) = e { Some(inner) } else { None }) + .filter_map(|e| if let Event::MultiPhase(inner) = e { Some(inner) } else { None }) .collect::>() } diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index ab2edfaac6c29..556c57eea5a10 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -1308,7 +1308,7 @@ mod tests { pub fn build_and_execute(self, test: impl FnOnce() -> ()) { MEMBERS.with(|m| *m.borrow_mut() = self.genesis_members.iter().map(|(m, _)| m.clone()).collect::>()); let mut ext: sp_io::TestExternalities = GenesisConfig { - pallet_balances: pallet_balances::GenesisConfig::{ + balances: pallet_balances::GenesisConfig::{ balances: vec![ (1, 10 * self.balance_factor), (2, 20 * self.balance_factor), @@ -1318,7 +1318,7 @@ mod tests { (6, 60 * self.balance_factor) ], }, - elections_phragmen: elections_phragmen::GenesisConfig:: { + elections: elections_phragmen::GenesisConfig:: { members: self.genesis_members }, }.build_storage().unwrap().into(); @@ -2134,7 +2134,7 @@ mod tests { System::set_block_number(5); Elections::on_initialize(System::block_number()); - System::assert_last_event(Event::elections_phragmen(super::Event::EmptyTerm)); + System::assert_last_event(Event::Elections(super::Event::EmptyTerm)); }) } @@ -2150,7 +2150,7 @@ mod tests { System::set_block_number(5); Elections::on_initialize(System::block_number()); - System::assert_last_event(Event::elections_phragmen(super::Event::NewTerm(vec![(4, 40), (5, 50)]))); + System::assert_last_event(Event::Elections(super::Event::NewTerm(vec![(4, 40), (5, 50)]))); assert_eq!(members_and_stake(), vec![(4, 40), (5, 50)]); assert_eq!(runners_up_and_stake(), vec![]); @@ -2161,7 +2161,7 @@ mod tests { System::set_block_number(10); Elections::on_initialize(System::block_number()); - System::assert_last_event(Event::elections_phragmen(super::Event::NewTerm(vec![]))); + System::assert_last_event(Event::Elections(super::Event::NewTerm(vec![]))); // outgoing have lost their bond. assert_eq!(balances(&4), (37, 0)); @@ -2231,7 +2231,7 @@ mod tests { assert_eq!(Elections::election_rounds(), 1); assert!(members_ids().is_empty()); - System::assert_last_event(Event::elections_phragmen(super::Event::NewTerm(vec![]))); + System::assert_last_event(Event::Elections(super::Event::NewTerm(vec![]))); }); } @@ -2589,7 +2589,7 @@ mod tests { // 5 is an outgoing loser. will also get slashed. assert_eq!(balances(&5), (45, 2)); - System::assert_has_event(Event::elections_phragmen(super::Event::NewTerm(vec![(4, 40), (5, 50)]))); + System::assert_has_event(Event::Elections(super::Event::NewTerm(vec![(4, 40), (5, 50)]))); }) } diff --git a/frame/elections/src/mock.rs b/frame/elections/src/mock.rs index b5dd15ce8119b..bb67622eb7ea1 100644 --- a/frame/elections/src/mock.rs +++ b/frame/elections/src/mock.rs @@ -197,7 +197,7 @@ impl ExtBuilder { PRESENT_SLASH_PER_VOTER.with(|v| *v.borrow_mut() = self.bad_presentation_punishment); DECAY_RATIO.with(|v| *v.borrow_mut() = self.decay_ratio); let mut ext: sp_io::TestExternalities = GenesisConfig { - pallet_balances: pallet_balances::GenesisConfig::{ + balances: pallet_balances::GenesisConfig::{ balances: vec![ (1, 10 * self.balance_factor), (2, 20 * self.balance_factor), diff --git a/frame/example/src/tests.rs b/frame/example/src/tests.rs index f4658c2807647..a290ea0f6576f 100644 --- a/frame/example/src/tests.rs +++ b/frame/example/src/tests.rs @@ -107,9 +107,9 @@ impl Config for Test { pub fn new_test_ext() -> sp_io::TestExternalities { let t = GenesisConfig { // We use default for brevity, but you can configure as desired if needed. - frame_system: Default::default(), - pallet_balances: Default::default(), - pallet_example: pallet_example::GenesisConfig { + system: Default::default(), + balances: Default::default(), + example: pallet_example::GenesisConfig { dummy: 42, // we configure the map with (key, value) pairs. bar: vec![(1, 2), (2, 3)], diff --git a/frame/offences/src/tests.rs b/frame/offences/src/tests.rs index f7bd90fe93e62..edc22cb239c44 100644 --- a/frame/offences/src/tests.rs +++ b/frame/offences/src/tests.rs @@ -131,7 +131,7 @@ fn should_deposit_event() { System::events(), vec![EventRecord { phase: Phase::Initialization, - event: Event::offences(crate::Event::Offence(KIND, time_slot.encode())), + event: Event::Offences(crate::Event::Offence(KIND, time_slot.encode())), topics: vec![], }] ); @@ -166,7 +166,7 @@ fn doesnt_deposit_event_for_dups() { System::events(), vec![EventRecord { phase: Phase::Initialization, - event: Event::offences(crate::Event::Offence(KIND, time_slot.encode())), + event: Event::Offences(crate::Event::Offence(KIND, time_slot.encode())), topics: vec![], }] ); diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 8930a6bfd61c8..b4ff35d0d6f90 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -794,7 +794,7 @@ macro_rules! assert_session_era { pub(crate) fn staking_events() -> Vec> { System::events().into_iter().map(|r| r.event).filter_map(|e| { - if let Event::staking(inner) = e { + if let Event::Staking(inner) = e { Some(inner) } else { None diff --git a/frame/sudo/src/tests.rs b/frame/sudo/src/tests.rs index 2f824ae6a3946..aa859c547c039 100644 --- a/frame/sudo/src/tests.rs +++ b/frame/sudo/src/tests.rs @@ -58,7 +58,7 @@ fn sudo_emits_events_correctly() { // Should emit event to indicate success when called with the root `key` and `call` is `Ok`. let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log(42, 1))); assert_ok!(Sudo::sudo(Origin::signed(1), call)); - System::assert_has_event(TestEvent::sudo(Event::Sudid(Ok(())))); + System::assert_has_event(TestEvent::Sudo(Event::Sudid(Ok(())))); }) } @@ -96,7 +96,7 @@ fn sudo_unchecked_weight_emits_events_correctly() { // Should emit event to indicate success when called with the root `key` and `call` is `Ok`. let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log(42, 1))); assert_ok!(Sudo::sudo_unchecked_weight(Origin::signed(1), call, 1_000)); - System::assert_has_event(TestEvent::sudo(Event::Sudid(Ok(())))); + System::assert_has_event(TestEvent::Sudo(Event::Sudid(Ok(())))); }) } @@ -122,10 +122,10 @@ fn set_key_emits_events_correctly() { // A root `key` can change the root `key`. assert_ok!(Sudo::set_key(Origin::signed(1), 2)); - System::assert_has_event(TestEvent::sudo(Event::KeyChanged(1))); + System::assert_has_event(TestEvent::Sudo(Event::KeyChanged(1))); // Double check. assert_ok!(Sudo::set_key(Origin::signed(2), 4)); - System::assert_has_event(TestEvent::sudo(Event::KeyChanged(2))); + System::assert_has_event(TestEvent::Sudo(Event::KeyChanged(2))); }); } @@ -160,6 +160,6 @@ fn sudo_as_emits_events_correctly() { // A non-privileged function will work when passed to `sudo_as` with the root `key`. let call = Box::new(Call::Logger(LoggerCall::non_privileged_log(42, 1))); assert_ok!(Sudo::sudo_as(Origin::signed(1), 2, call)); - System::assert_has_event(TestEvent::sudo(Event::SudoAsDone(Ok(())))); + System::assert_has_event(TestEvent::Sudo(Event::SudoAsDone(Ok(())))); }); } diff --git a/frame/support/procedural/src/construct_runtime/expand/config.rs b/frame/support/procedural/src/construct_runtime/expand/config.rs index 93d4a868b7847..0400bd52f433a 100644 --- a/frame/support/procedural/src/construct_runtime/expand/config.rs +++ b/frame/support/procedural/src/construct_runtime/expand/config.rs @@ -16,6 +16,7 @@ // limitations under the License use crate::construct_runtime::Pallet; +use inflector::Inflector; use proc_macro2::TokenStream; use quote::{format_ident, quote}; use syn::Ident; @@ -32,12 +33,8 @@ pub fn expand_outer_config( for decl in pallet_decls { if let Some(pallet_entry) = decl.find_part("Config") { let config = format_ident!("{}Config", decl.name); - let mod_name = decl.pallet.mod_name(); - let field_name = if let Some(inst) = decl.instance.as_ref() { - format_ident!("{}_{}", mod_name, inst) - } else { - mod_name - }; + let pallet_name = &decl.name.to_string().to_snake_case(); + let field_name = &Ident::new(pallet_name, decl.name.span()); let part_is_generic = !pallet_entry.generics.params.is_empty(); types.extend(expand_config_types(runtime, decl, &config, part_is_generic)); @@ -56,7 +53,6 @@ pub fn expand_outer_config( #[serde(rename_all = "camelCase")] #[serde(deny_unknown_fields)] #[serde(crate = "__genesis_config_serde_import__")] - #[allow(non_snake_case)] pub struct GenesisConfig { #fields } @@ -85,7 +81,7 @@ fn expand_config_types( config: &Ident, part_is_generic: bool, ) -> TokenStream { - let path = &decl.pallet; + let path = &decl.path; match (decl.instance.as_ref(), part_is_generic) { (Some(inst), true) => quote!{ @@ -109,7 +105,7 @@ fn expand_config_build_storage_call( decl: &Pallet, field_name: &Ident, ) -> TokenStream { - let path = &decl.pallet; + let path = &decl.path; let instance = if let Some(inst) = decl.instance.as_ref() { quote!(#path::#inst) } else { diff --git a/frame/support/procedural/src/construct_runtime/expand/event.rs b/frame/support/procedural/src/construct_runtime/expand/event.rs index c2c905e50ff8d..afedb3ed92508 100644 --- a/frame/support/procedural/src/construct_runtime/expand/event.rs +++ b/frame/support/procedural/src/construct_runtime/expand/event.rs @@ -15,9 +15,9 @@ // See the License for the specific language governing permissions and // limitations under the License -use crate::construct_runtime::{Pallet, parse::PalletPath}; +use crate::construct_runtime::Pallet; use proc_macro2::TokenStream; -use quote::{format_ident, quote}; +use quote::quote; use syn::{Generics, Ident}; pub fn expand_outer_event( @@ -27,11 +27,10 @@ pub fn expand_outer_event( ) -> syn::Result { let mut event_variants = TokenStream::new(); let mut event_conversions = TokenStream::new(); - let mut events_metadata = TokenStream::new(); for pallet_decl in pallet_decls { if let Some(pallet_entry) = pallet_decl.find_part("Event") { - let path = &pallet_decl.pallet; + let path = &pallet_decl.path; let index = pallet_decl.index; let instance = pallet_decl.instance.as_ref(); let generics = &pallet_entry.generics; @@ -53,9 +52,8 @@ pub fn expand_outer_event( (None, false) => quote!(#path::Event), }; - event_variants.extend(expand_event_variant(runtime, path, index, instance, generics)); - event_conversions.extend(expand_event_conversion(scrate, path, instance, &pallet_event)); - events_metadata.extend(expand_event_metadata(scrate, path, &pallet_event)); + event_variants.extend(expand_event_variant(runtime, pallet_decl, index, instance, generics)); + event_conversions.extend(expand_event_conversion(scrate, pallet_decl, &pallet_event)); } } @@ -77,49 +75,42 @@ pub fn expand_outer_event( fn expand_event_variant( runtime: &Ident, - path: &PalletPath, + pallet: &Pallet, index: u8, instance: Option<&Ident>, generics: &Generics, ) -> TokenStream { + let path = &pallet.path; + let variant_name = &pallet.name; let part_is_generic = !generics.params.is_empty(); - let mod_name = &path.mod_name(); - match (instance, part_is_generic) { - (Some(inst), true) => { - let variant = format_ident!("{}_{}", mod_name, inst); - quote!(#[codec(index = #index)] #variant(#path::Event<#runtime, #path::#inst>),) + match instance { + Some(inst) if part_is_generic => { + quote!(#[codec(index = #index)] #variant_name(#path::Event<#runtime, #path::#inst>),) } - (Some(inst), false) => { - let variant = format_ident!("{}_{}", mod_name, inst); - quote!(#[codec(index = #index)] #variant(#path::Event<#path::#inst>),) + Some(inst) => { + quote!(#[codec(index = #index)] #variant_name(#path::Event<#path::#inst>),) } - (None, true) => { - quote!(#[codec(index = #index)] #mod_name(#path::Event<#runtime>),) + None if part_is_generic => { + quote!(#[codec(index = #index)] #variant_name(#path::Event<#runtime>),) } - (None, false) => { - quote!(#[codec(index = #index)] #mod_name(#path::Event),) + None => { + quote!(#[codec(index = #index)] #variant_name(#path::Event),) } } } fn expand_event_conversion( scrate: &TokenStream, - path: &PalletPath, - instance: Option<&Ident>, + pallet: &Pallet, pallet_event: &TokenStream, ) -> TokenStream { - let mod_name = path.mod_name(); - let variant = if let Some(inst) = instance { - format_ident!("{}_{}", mod_name, inst) - } else { - mod_name - }; + let variant_name = &pallet.name; quote!{ impl From<#pallet_event> for Event { fn from(x: #pallet_event) -> Self { - Event::#variant(x) + Event::#variant_name(x) } } impl #scrate::sp_std::convert::TryInto<#pallet_event> for Event { @@ -127,20 +118,10 @@ fn expand_event_conversion( fn try_into(self) -> #scrate::sp_std::result::Result<#pallet_event, Self::Error> { match self { - Self::#variant(evt) => Ok(evt), + Self::#variant_name(evt) => Ok(evt), _ => Err(()), } } } } } - -fn expand_event_metadata( - scrate: &TokenStream, - path: &PalletPath, - pallet_event: &TokenStream, -) -> TokenStream { - let mod_name = path.mod_name(); - - quote!{(stringify!(#mod_name), #scrate::event::FnEncode(#pallet_event::metadata)),} -} diff --git a/frame/support/procedural/src/construct_runtime/expand/metadata.rs b/frame/support/procedural/src/construct_runtime/expand/metadata.rs index cbabec73d3a6f..5854d0edccabb 100644 --- a/frame/support/procedural/src/construct_runtime/expand/metadata.rs +++ b/frame/support/procedural/src/construct_runtime/expand/metadata.rs @@ -92,7 +92,7 @@ fn expand_pallet_metadata_storage( ) -> TokenStream { if filtered_names.contains(&"Storage") { let instance = decl.instance.as_ref().into_iter(); - let path = &decl.pallet; + let path = &decl.path; quote!{ Some(#scrate::metadata::DecodeDifferent::Encode( @@ -114,7 +114,7 @@ fn expand_pallet_metadata_calls( ) -> TokenStream { if filtered_names.contains(&"Call") { let instance = decl.instance.as_ref().into_iter(); - let path = &decl.pallet; + let path = &decl.path; quote!{ Some(#scrate::metadata::DecodeDifferent::Encode( @@ -135,7 +135,7 @@ fn expand_pallet_metadata_events( decl: &Pallet, ) -> TokenStream { if filtered_names.contains(&"Event") { - let path = &decl.pallet; + let path = &decl.path; let part_is_generic = !decl.find_part("Event").expect("Event part exists; qed").generics.params.is_empty(); let pallet_event = match (decl.instance.as_ref(), part_is_generic) { @@ -160,7 +160,7 @@ fn expand_pallet_metadata_constants( scrate: &TokenStream, decl: &Pallet, ) -> TokenStream { - let path = &decl.pallet; + let path = &decl.path; let instance = decl.instance.as_ref().into_iter(); quote!{ @@ -177,7 +177,7 @@ fn expand_pallet_metadata_errors( scrate: &TokenStream, decl: &Pallet, ) -> TokenStream { - let path = &decl.pallet; + let path = &decl.path; let instance = decl.instance.as_ref().into_iter(); quote!{ diff --git a/frame/support/procedural/src/construct_runtime/expand/origin.rs b/frame/support/procedural/src/construct_runtime/expand/origin.rs index 021396e64caa8..2d0cc8300cb76 100644 --- a/frame/support/procedural/src/construct_runtime/expand/origin.rs +++ b/frame/support/procedural/src/construct_runtime/expand/origin.rs @@ -15,9 +15,9 @@ // See the License for the specific language governing permissions and // limitations under the License -use crate::construct_runtime::{parse::PalletPath, Pallet, SYSTEM_PALLET_NAME}; +use crate::construct_runtime::{Pallet, SYSTEM_PALLET_NAME}; use proc_macro2::TokenStream; -use quote::{format_ident, quote}; +use quote::quote; use syn::{token, Ident, Generics}; pub fn expand_outer_origin( @@ -39,7 +39,6 @@ pub fn expand_outer_origin( for pallet_decl in pallets.iter().filter(|pallet| pallet.name != SYSTEM_PALLET_NAME) { if let Some(pallet_entry) = pallet_decl.find_part("Origin") { - let path = &pallet_decl.pallet; let instance = pallet_decl.instance.as_ref(); let index = pallet_decl.index; let generics = &pallet_entry.generics; @@ -54,15 +53,15 @@ pub fn expand_outer_origin( } caller_variants.extend( - expand_origin_caller_variant(runtime, path, index, instance, generics), + expand_origin_caller_variant(runtime, pallet_decl, index, instance, generics), ); pallet_conversions.extend( - expand_origin_pallet_conversions(scrate, runtime, path, instance, generics), + expand_origin_pallet_conversions(scrate, runtime, pallet_decl, instance, generics), ); } } - let system_path = &system_pallet.pallet; + let system_path = &system_pallet.path; let system_index = system_pallet.index; Ok(quote!{ @@ -251,28 +250,27 @@ pub fn expand_outer_origin( fn expand_origin_caller_variant( runtime: &Ident, - path: &PalletPath, + pallet: &Pallet, index: u8, instance: Option<&Ident>, generics: &Generics, ) -> TokenStream { let part_is_generic = !generics.params.is_empty(); - let mod_name = &path.mod_name(); + let variant_name = &pallet.name; + let path = &pallet.path; - match (instance, part_is_generic) { - (Some(inst), true) => { - let variant = format_ident!("{}_{}", mod_name, inst); - quote!(#[codec(index = #index)] #variant(#path::Origin<#runtime, #path::#inst>),) + match instance { + Some(inst) if part_is_generic => { + quote!(#[codec(index = #index)] #variant_name(#path::Origin<#runtime, #path::#inst>),) } - (Some(inst), false) => { - let variant = format_ident!("{}_{}", mod_name, inst); - quote!(#[codec(index = #index)] #variant(#path::Origin<#path::#inst>),) + Some(inst) => { + quote!(#[codec(index = #index)] #variant_name(#path::Origin<#path::#inst>),) } - (None, true) => { - quote!(#[codec(index = #index)] #mod_name(#path::Origin<#runtime>),) + None if part_is_generic => { + quote!(#[codec(index = #index)] #variant_name(#path::Origin<#runtime>),) } - (None, false) => { - quote!(#[codec(index = #index)] #mod_name(#path::Origin),) + None => { + quote!(#[codec(index = #index)] #variant_name(#path::Origin),) } } } @@ -280,29 +278,25 @@ fn expand_origin_caller_variant( fn expand_origin_pallet_conversions( scrate: &TokenStream, runtime: &Ident, - path: &PalletPath, + pallet: &Pallet, instance: Option<&Ident>, generics: &Generics, ) -> TokenStream { - let mod_name = path.mod_name(); - let variant = if let Some(inst) = instance { - format_ident!("{}_{}", mod_name, inst) - } else { - mod_name - }; + let path = &pallet.path; + let variant_name = &pallet.name; let part_is_generic = !generics.params.is_empty(); - let pallet_origin = match (instance, part_is_generic) { - (Some(inst), true) => quote!(#path::Origin<#runtime, #path::#inst>), - (Some(inst), false) => quote!(#path::Origin<#path::#inst>), - (None, true) => quote!(#path::Origin<#runtime>), - (None, false) => quote!(#path::Origin), + let pallet_origin = match instance { + Some(inst) if part_is_generic => quote!(#path::Origin<#runtime, #path::#inst>), + Some(inst) => quote!(#path::Origin<#path::#inst>), + None if part_is_generic => quote!(#path::Origin<#runtime>), + None => quote!(#path::Origin), }; quote!{ impl From<#pallet_origin> for OriginCaller { fn from(x: #pallet_origin) -> Self { - OriginCaller::#variant(x) + OriginCaller::#variant_name(x) } } @@ -317,7 +311,7 @@ fn expand_origin_pallet_conversions( impl From for #scrate::sp_std::result::Result<#pallet_origin, Origin> { /// NOTE: converting to pallet origin loses the origin filter information. fn from(val: Origin) -> Self { - if let OriginCaller::#variant(l) = val.caller { + if let OriginCaller::#variant_name(l) = val.caller { Ok(l) } else { Err(val) @@ -330,7 +324,7 @@ fn expand_origin_pallet_conversions( fn try_from( x: OriginCaller, ) -> #scrate::sp_std::result::Result<#pallet_origin, OriginCaller> { - if let OriginCaller::#variant(l) = x { + if let OriginCaller::#variant_name(l) = x { Ok(l) } else { Err(x) diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index a24168c463aa7..eb3550355aa40 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -35,7 +35,7 @@ const SYSTEM_PALLET_NAME: &str = "System"; pub struct Pallet { pub name: Ident, pub index: u8, - pub pallet: PalletPath, + pub path: PalletPath, pub instance: Option, pub pallet_parts: Vec, } @@ -101,7 +101,7 @@ fn complete_pallets(decl: impl Iterator) -> syn::Resul Ok(Pallet { name: pallet.name, index: final_index, - pallet: pallet.pallet, + path: pallet.path, instance: pallet.instance, pallet_parts: pallet.pallet_parts, }) @@ -252,7 +252,7 @@ fn decl_outer_dispatch<'a>( let pallets_tokens = pallet_declarations .filter(|pallet_declaration| pallet_declaration.exists_part("Call")) .map(|pallet_declaration| { - let pallet = &pallet_declaration.pallet.inner.segments.last().unwrap(); + let pallet = &pallet_declaration.path.inner.segments.last().unwrap(); let name = &pallet_declaration.name; let index = pallet_declaration.index; quote!(#[codec(index = #index)] #pallet::#name) @@ -275,7 +275,7 @@ fn decl_all_pallets<'a>( let mut names = Vec::new(); for pallet_declaration in pallet_declarations { let type_name = &pallet_declaration.name; - let pallet = &pallet_declaration.pallet; + let pallet = &pallet_declaration.path; let mut generics = vec![quote!(#runtime)]; generics.extend( pallet_declaration diff --git a/frame/support/procedural/src/construct_runtime/parse.rs b/frame/support/procedural/src/construct_runtime/parse.rs index 390729865e98d..2d242749cfe01 100644 --- a/frame/support/procedural/src/construct_runtime/parse.rs +++ b/frame/support/procedural/src/construct_runtime/parse.rs @@ -156,7 +156,7 @@ pub struct PalletDeclaration { pub name: Ident, /// Optional fixed index (e.g. `MyPallet ... = 3,`) pub index: Option, - pub pallet: PalletPath, + pub path: PalletPath, pub instance: Option, pub pallet_parts: Vec, } @@ -165,7 +165,7 @@ impl Parse for PalletDeclaration { fn parse(input: ParseStream) -> Result { let name = input.parse()?; let _: Token![:] = input.parse()?; - let pallet = input.parse()?; + let path = input.parse()?; let instance = if input.peek(Token![<]) { let _: Token![<] = input.parse()?; let res = Some(input.parse()?); @@ -189,7 +189,7 @@ impl Parse for PalletDeclaration { let parsed = Self { name, - pallet, + path, instance, pallet_parts, index, @@ -247,30 +247,6 @@ impl Parse for PalletPath { } } -impl PalletPath { - /// Return the snake-cased module name for this path. - pub fn mod_name(&self) -> Ident { - let mut iter = self.inner.segments.iter(); - let mut mod_name = match &iter.next().expect("Path should always have 1 segment; qed").ident { - ident if ident == "self" || ident == "super" || ident == "crate" => { - // Skip `crate`, `self` and `super` quasi-keywords when creating the module name - iter.next() - .expect("There must be a path segment pointing to a pallet following \ - `crate`, `self` or `super`; qed") - .ident - .clone() - } - ident => ident.clone(), - }; - - for segment in iter { - mod_name = quote::format_ident!("{}_{}", mod_name, segment.ident); - } - - mod_name - } -} - impl quote::ToTokens for PalletPath { fn to_tokens(&self, tokens: &mut TokenStream) { self.inner.to_tokens(tokens); diff --git a/frame/support/test/tests/construct_runtime.rs b/frame/support/test/tests/construct_runtime.rs index 6b0a7091edff9..7858595108b0e 100644 --- a/frame/support/test/tests/construct_runtime.rs +++ b/frame/support/test/tests/construct_runtime.rs @@ -383,31 +383,31 @@ fn origin_codec() { let origin = OriginCaller::system(system::RawOrigin::None); assert_eq!(origin.encode()[0], 30); - let origin = OriginCaller::module1_Instance1(module1::Origin(Default::default())); + let origin = OriginCaller::Module1_1(module1::Origin(Default::default())); assert_eq!(origin.encode()[0], 31); - let origin = OriginCaller::module2(module2::Origin); + let origin = OriginCaller::Module2(module2::Origin); assert_eq!(origin.encode()[0], 32); - let origin = OriginCaller::module1_Instance2(module1::Origin(Default::default())); + let origin = OriginCaller::Module1_2(module1::Origin(Default::default())); assert_eq!(origin.encode()[0], 33); - let origin = OriginCaller::nested_module3(nested::module3::Origin); + let origin = OriginCaller::NestedModule3(nested::module3::Origin); assert_eq!(origin.encode()[0], 34); - let origin = OriginCaller::module3(module3::Origin(Default::default())); + let origin = OriginCaller::Module3(module3::Origin(Default::default())); assert_eq!(origin.encode()[0], 35); - let origin = OriginCaller::module1_Instance6(module1::Origin(Default::default())); + let origin = OriginCaller::Module1_6(module1::Origin(Default::default())); assert_eq!(origin.encode()[0], 1); - let origin = OriginCaller::module1_Instance7(module1::Origin(Default::default())); + let origin = OriginCaller::Module1_7(module1::Origin(Default::default())); assert_eq!(origin.encode()[0], 2); - let origin = OriginCaller::module1_Instance8(module1::Origin(Default::default())); + let origin = OriginCaller::Module1_8(module1::Origin(Default::default())); assert_eq!(origin.encode()[0], 12); - let origin = OriginCaller::module1_Instance9(module1::Origin(Default::default())); + let origin = OriginCaller::Module1_9(module1::Origin(Default::default())); assert_eq!(origin.encode()[0], 13); } diff --git a/frame/support/test/tests/instance.rs b/frame/support/test/tests/instance.rs index 077763ac9128d..d952fd82eb0df 100644 --- a/frame/support/test/tests/instance.rs +++ b/frame/support/test/tests/instance.rs @@ -299,26 +299,26 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic sp_io::TestExternalities { GenesisConfig{ - module1_Instance1: module1::GenesisConfig { + module_1_1: module1::GenesisConfig { value: 3, test: 2, }, - module1_Instance2: module1::GenesisConfig { + module_1_2: module1::GenesisConfig { value: 4, test: 5, }, - module2: module2::GenesisConfig { + module_2: module2::GenesisConfig { value: 4, map: vec![(0, 0)], double_map: vec![(0, 0, 0)], }, - module2_Instance1: module2::GenesisConfig { + module_2_1: module2::GenesisConfig { value: 4, map: vec![(0, 0)], double_map: vec![(0, 0, 0)], }, - module2_Instance2: Default::default(), - module2_Instance3: Default::default(), + module_2_2: Default::default(), + module_2_3: Default::default(), }.build_storage().unwrap().into() } diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index 7478da189df07..f7e04e9226874 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -485,7 +485,7 @@ fn transactional_works() { pallet::Call::::foo_transactional(1).dispatch_bypass_filter(None.into()).unwrap(); assert_eq!( frame_system::Pallet::::events().iter().map(|e| &e.event).collect::>(), - vec![&Event::pallet(pallet::Event::Something(0))], + vec![&Event::Example(pallet::Event::Something(0))], ); }) } @@ -550,7 +550,7 @@ fn pallet_expand_deposit_event() { pallet::Call::::foo(3, 0).dispatch_bypass_filter(None.into()).unwrap(); assert_eq!( frame_system::Pallet::::events()[0].event, - Event::pallet(pallet::Event::Something(3)), + Event::Example(pallet::Event::Something(3)), ); }) } @@ -643,15 +643,15 @@ fn pallet_hooks_expand() { assert_eq!( frame_system::Pallet::::events()[0].event, - Event::pallet(pallet::Event::Something(10)), + Event::Example(pallet::Event::Something(10)), ); assert_eq!( frame_system::Pallet::::events()[1].event, - Event::pallet(pallet::Event::Something(20)), + Event::Example(pallet::Event::Something(20)), ); assert_eq!( frame_system::Pallet::::events()[2].event, - Event::pallet(pallet::Event::Something(30)), + Event::Example(pallet::Event::Something(30)), ); }) } diff --git a/frame/support/test/tests/pallet_instance.rs b/frame/support/test/tests/pallet_instance.rs index 846a96a237c95..48ff166c5b226 100644 --- a/frame/support/test/tests/pallet_instance.rs +++ b/frame/support/test/tests/pallet_instance.rs @@ -394,7 +394,7 @@ fn pallet_expand_deposit_event() { pallet::Call::::foo(3).dispatch_bypass_filter(None.into()).unwrap(); assert_eq!( frame_system::Pallet::::events()[0].event, - Event::pallet(pallet::Event::Something(3)), + Event::Example(pallet::Event::Something(3)), ); }); @@ -403,7 +403,7 @@ fn pallet_expand_deposit_event() { pallet::Call::::foo(3).dispatch_bypass_filter(None.into()).unwrap(); assert_eq!( frame_system::Pallet::::events()[0].event, - Event::pallet_Instance1(pallet::Event::Something(3)), + Event::Instance1Example(pallet::Event::Something(3)), ); }); } @@ -539,27 +539,27 @@ fn pallet_hooks_expand() { // The order is indeed reversed due to https://github.com/paritytech/substrate/issues/6280 assert_eq!( frame_system::Pallet::::events()[0].event, - Event::pallet_Instance1(pallet::Event::Something(11)), + Event::Instance1Example(pallet::Event::Something(11)), ); assert_eq!( frame_system::Pallet::::events()[1].event, - Event::pallet(pallet::Event::Something(10)), + Event::Example(pallet::Event::Something(10)), ); assert_eq!( frame_system::Pallet::::events()[2].event, - Event::pallet_Instance1(pallet::Event::Something(21)), + Event::Instance1Example(pallet::Event::Something(21)), ); assert_eq!( frame_system::Pallet::::events()[3].event, - Event::pallet(pallet::Event::Something(20)), + Event::Example(pallet::Event::Something(20)), ); assert_eq!( frame_system::Pallet::::events()[4].event, - Event::pallet_Instance1(pallet::Event::Something(31)), + Event::Instance1Example(pallet::Event::Something(31)), ); assert_eq!( frame_system::Pallet::::events()[5].event, - Event::pallet(pallet::Event::Something(30)), + Event::Example(pallet::Event::Something(30)), ); }) } diff --git a/frame/tips/src/tests.rs b/frame/tips/src/tests.rs index 6b144273ca828..6063f0954bd8a 100644 --- a/frame/tips/src/tests.rs +++ b/frame/tips/src/tests.rs @@ -176,7 +176,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { fn last_event() -> RawEvent { System::events().into_iter().map(|r| r.event) .filter_map(|e| { - if let Event::tips(inner) = e { Some(inner) } else { None } + if let Event::TipsModTestInst(inner) = e { Some(inner) } else { None } }) .last() .unwrap() diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 2b1ad2db9ae09..1ce3f75d5a016 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -1177,9 +1177,9 @@ mod tests { ); assert_eq!(Balances::free_balance(2), 0); // Transfer Event - System::assert_has_event(Event::pallet_balances(pallet_balances::Event::Transfer(2, 3, 80))); + System::assert_has_event(Event::Balances(pallet_balances::Event::Transfer(2, 3, 80))); // Killed Event - System::assert_has_event(Event::system(system::Event::KilledAccount(2))); + System::assert_has_event(Event::System(system::Event::KilledAccount(2))); }); } diff --git a/frame/transaction-storage/src/mock.rs b/frame/transaction-storage/src/mock.rs index 351893c08a33b..03dacf8a98e89 100644 --- a/frame/transaction-storage/src/mock.rs +++ b/frame/transaction-storage/src/mock.rs @@ -102,11 +102,11 @@ impl pallet_transaction_storage::Config for Test { pub fn new_test_ext() -> sp_io::TestExternalities { let t = GenesisConfig { - frame_system: Default::default(), - pallet_balances: pallet_balances::GenesisConfig:: { + system: Default::default(), + balances: pallet_balances::GenesisConfig:: { balances: vec![(1, 1000000000), (2, 100), (3, 100), (4, 100)] }, - pallet_transaction_storage: pallet_transaction_storage::GenesisConfig:: { + transaction_storage: pallet_transaction_storage::GenesisConfig:: { storage_period: 10, byte_fee: 2, entry_fee: 200, From a2f48bf96eecbb5cd1f45bd5319ba814595eaaef Mon Sep 17 00:00:00 2001 From: Squirrel Date: Wed, 9 Jun 2021 10:56:31 +0100 Subject: [PATCH 22/61] Stop sending network_state to telemetry (#9026) (We send network information to prometheus) --- client/service/src/metrics.rs | 25 +------------------------ 1 file changed, 1 insertion(+), 24 deletions(-) diff --git a/client/service/src/metrics.rs b/client/service/src/metrics.rs index 516fb243557cf..8fc48ccf8c863 100644 --- a/client/service/src/metrics.rs +++ b/client/service/src/metrics.rs @@ -27,7 +27,7 @@ use sp_runtime::traits::{NumberFor, Block, SaturatedConversion, UniqueSaturatedI use sp_transaction_pool::{PoolStatus, MaintainedTransactionPool}; use sp_utils::metrics::register_globals; use sc_client_api::{ClientInfo, UsageProvider}; -use sc_network::{config::Role, NetworkStatus, NetworkService, network_state::NetworkState}; +use sc_network::{config::Role, NetworkStatus, NetworkService}; use std::sync::Arc; use std::time::Duration; use wasm_timer::Instant; @@ -171,30 +171,18 @@ impl MetricsService { let mut timer = Delay::new(Duration::from_secs(0)); let timer_interval = Duration::from_secs(5); - let net_state_duration = Duration::from_secs(30); - let mut last_net_state = Instant::now(); - loop { // Wait for the next tick of the timer. (&mut timer).await; - let now = Instant::now(); - let from_net_state = now.duration_since(last_net_state); // Try to get the latest network information. let net_status = network.status().await.ok(); - let net_state = if from_net_state >= net_state_duration { - last_net_state = now; - network.network_state().await.ok() - } else { - None - }; // Update / Send the metrics. self.update( &client.usage_info(), &transactions.status(), net_status, - net_state, ); // Schedule next tick. @@ -207,7 +195,6 @@ impl MetricsService { info: &ClientInfo, txpool_status: &PoolStatus, net_status: Option>, - net_state: Option, ) { let now = Instant::now(); let elapsed = (now - self.last_update).as_secs(); @@ -300,15 +287,5 @@ impl MetricsService { } } } - - // Send network state information, if any. - if let Some(net_state) = net_state { - telemetry!( - self.telemetry; - SUBSTRATE_INFO; - "system.network_state"; - "state" => net_state, - ); - } } } From 4d64381801d6df6567d261f319b2c5981a692f72 Mon Sep 17 00:00:00 2001 From: radupopa2010 Date: Wed, 9 Jun 2021 15:51:27 +0200 Subject: [PATCH 23/61] READY Update simnet tests to v5 (#8946) * Update simnet tests to v4 * enable simnet tests for PRs * add stage to job "test-linux-stable-int" * v2.0.0simnet * alow build-for-simnet option * Fix passing of IMAGE_TAG to downstream * forgot to build-for-simnet * build-for-simnet * build-for-simnet * build-for-simnet * build-for-simnet * build-for-simnet * take a shortcut build-for-simnet * build-for-simnet * update triggering script to polkadot version * "revert me" * "revert me" build-for-simnet * add simnet version as arg to script * revert me build-for-simnet * build-for-simnet * remove triggering simnet for PRs for now * Add suggestions from Vladimir * Add suggestions from Vladimir --- .gitlab-ci.yml | 11 +- .maintain/gitlab/trigger_pipeline.sh | 230 ++++++++++++++++++++++----- 2 files changed, 197 insertions(+), 44 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 03fe9f8a2dcab..9b28bb2e25a88 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -355,6 +355,7 @@ test-frame-examples-compile-to-wasm: test-linux-stable-int: <<: *test-linux + stage: test script: - echo "___Logs will be partly shown at the end in case of failure.___" - echo "___Full log will be saved to the job artifacts only in case of failure.___" @@ -567,9 +568,11 @@ build-rust-doc: - buildah push --format=v2s2 "$IMAGE_NAME:latest" after_script: - buildah logout "$IMAGE_NAME" - # pass artifacts to the trigger-simnet job - - echo "IMAGE_NAME=${IMAGE_NAME}" > ./artifacts/$PRODUCT/build.env - - echo "IMAGE_TAG=${VERSION}" >> ./artifacts/$PRODUCT/build.env + # pass artifacts to the trigger-simnet job + - echo "IMAGE_NAME=${IMAGE_NAME}" | tee -a ./artifacts/$PRODUCT/build.env + - IMAGE_TAG="$(cat ./artifacts/$PRODUCT/VERSION)" + - echo "IMAGE_TAG=${IMAGE_TAG}" | tee -a ./artifacts/$PRODUCT/build.env + - cat ./artifacts/$PRODUCT/build.env publish-docker-substrate: stage: publish @@ -708,4 +711,4 @@ trigger-simnet: DWNSTRM_ID: 332 script: # API trigger for a simnet job - - .maintain/gitlab/trigger_pipeline.sh + - .maintain/gitlab/trigger_pipeline.sh --simnet-version=${SIMNET_REF} diff --git a/.maintain/gitlab/trigger_pipeline.sh b/.maintain/gitlab/trigger_pipeline.sh index 0e95a6458e4d7..3ed9215405afb 100755 --- a/.maintain/gitlab/trigger_pipeline.sh +++ b/.maintain/gitlab/trigger_pipeline.sh @@ -1,30 +1,181 @@ #!/bin/bash -set -eu - -# API trigger another project's pipeline -echo "Triggering Simnet pipeline." - -curl --silent \ - -X POST \ - -F "token=${CI_JOB_TOKEN}" \ - -F "ref=v3" `# trigger the pinned version of simnet CI config` \ - -F "variables[TRGR_PROJECT]=${TRGR_PROJECT}" \ - -F "variables[TRGR_REF]=${TRGR_REF}" \ - -F "variables[IMAGE_NAME]=${IMAGE_NAME}" \ - -F "variables[IMAGE_TAG]=${IMAGE_TAG}" \ - "https://${CI_SERVER_HOST}/api/v4/projects/${DWNSTRM_ID}/trigger/pipeline" | \ - tee pipeline - -PIPELINE_ID=$(cat pipeline | jq ".id") -PIPELINE_URL=$(cat pipeline | jq ".web_url") -echo -echo "Simnet pipeline ${PIPELINE_URL} was successfully triggered." -echo "Now we're polling it to obtain the distinguished status." - -# This is a workaround for a Gitlab bug, waits here until -# https://gitlab.com/gitlab-org/gitlab/-/issues/326137 gets fixed. -# The timeout is 360 curls with 8 sec interval, roughly an hour. +set -eou pipefail + +# This script is to trigger Simnet pipeline. +# See help article for more details. + +SCRIPT_NAME="$0" +SCRIPT_PATH=$(dirname "$0") # relative +SCRIPT_PATH=$(cd "${SCRIPT_PATH}" && pwd) # absolutized and normalized +SIMNET_VERSION="" + +function usage { + cat << EOF +This script is to trigger Simnet pipeline. +It's designed to be launched locally and from CI. +The required argumants for both cases are listed below. + +Usage: ${SCRIPT_NAME} OPTION + +OPTIONS + + -h, --help Print this help message. + + Mandatory in both cases: + + -s, --simnet-version Simnet version to trigger. + E.g.: v4 + + -u, --upstream-project Triggering project. + E.g.: substrate + + -r, --upstream-ref The branch or tag name for which project is built. + E.g.: master + + -d, --downstream-id Downstream project's ID to trigger. + E.g.: 332 (simnet project id) + + -n, --image-name Name of image to test. + E.g.: docker.io/paritypr/synth-wave + + -i, --image-tag Tag of the image to test. + E.g.: master + + -c, --collator-image-tag Tag of collator image. Image name is hardcoded. + E.g.: master + + Required for local launch: + + -g, --ci-server-fqdn FQDN of your gitlab server. + E.g.: gitlab.parity.io + + -t, --trigger-token Gitlab trigger token. This must be defined in + project -> settings -> CI/CD -> Pipeline triggers + Defaults to CI_JOB_TOKEN + https://stackoverflow.com/questions/42746634/gitlab-trigger-api-returns-404 + + -a, --access-token Gitlab peronal access token or it defaults to + PIPELINE_TOKEN (gitlab variable) + https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html + +EXAMPLES + ${SCRIPT_NAME} -s v4 + ${SCRIPT_NAME} --simnet-version=v4 + + Local test example. You need to set the 2 vars before running: TR_TOKEN and PERS_TOKEN + ${SCRIPT_NAME} --simnet-version=v4 \\ + --upstream-project=substrate \\ + --upstream-ref=master \\ + --image-name=docker.io/paritypr/synth-wave \\ + --image-tag=master \\ + --collator-image-tag=master \\ + --ci-server-fqdn=gitlab.parity.io \\ + --downstream-id=332 \\ + --trigger-token="\${TR_TOKEN}" \\ + --access-token="\${PERS_TOKEN}" +EOF +} + +function main { + # Main entry point for the script. + parse_args "$@" + check_args + trigger_pipeline + check_pipeline + poll_pipeline +} + +function parse_args { + # shellcheck disable=SC2214 + while getopts c:u:r:i:n:g:t:r:a:s:h-: OPT; do + # support long options: https://stackoverflow.com/a/28466267/519360 + if [ "${OPT}" = "-" ]; then # long option: reformulate OPT and OPTARG + OPT="${OPTARG%%=*}" # extract long option name + OPTARG="${OPTARG#$OPT}" # extract long option argument (may be empty) + OPTARG="${OPTARG#=}" # if long option argument, remove assigning `=` + fi + case "${OPT}" in + h | help ) usage ; exit 0 ;; + s | simnet-version ) needs_arg ; SIMNET_VERSION="${OPTARG}" ;; + u | upstream-project ) needs_arg ; TRGR_PROJECT="${OPTARG}" ;; + r | upstream-ref ) needs_arg ; TRGR_REF="${OPTARG}" ;; + n | image-name ) needs_arg ; IMAGE_NAME="${OPTARG}" ;; + i | image-tag ) needs_arg ; IMAGE_TAG="${OPTARG}" ;; + c | collator-image-tag ) needs_arg ; COLLATOR_IMAGE_TAG="${OPTARG}" ;; + g | ci-server-fqdn ) needs_arg ; CI_SERVER_HOST="${OPTARG}" ;; + d | downstream-id ) needs_arg ; DWNSTRM_ID="${OPTARG}" ;; + t | trigger-token ) needs_arg ; CI_JOB_TOKEN="${OPTARG}" ;; + a | access-token ) needs_arg ; PIPELINE_TOKEN="${OPTARG}" ;; + ??* ) log DIE "Illegal option --${OPT}" ;; # bad long option + ? ) exit 2 ;; # bad short option (error reported via getopts) + esac + done + shift $((OPTIND-1)) # remove parsed options and args from $@ list + +} + +function check_args { + if [[ -z "${SIMNET_VERSION}" ]] ; then + log DIE "Must specify value for mandatory argument -s,--simnet-version + +$(usage)" + fi +} + +function needs_arg { + if [ -z "${OPTARG}" ]; then + log DIE "No arg for --${OPT} option" + fi +} + +function trigger_pipeline { + # API trigger another project's pipeline. + log INFO "Triggering Simnet pipeline." + + curl --silent \ + -X POST \ + -F "token=${CI_JOB_TOKEN}" \ + -F "ref=${SIMNET_VERSION}" \ + -F "variables[TRGR_PROJECT]=${TRGR_PROJECT}" \ + -F "variables[TRGR_REF]=${TRGR_REF}" \ + -F "variables[IMAGE_NAME]=${IMAGE_NAME}" \ + -F "variables[IMAGE_TAG]=${IMAGE_TAG}" \ + "https://${CI_SERVER_HOST}/api/v4/projects/${DWNSTRM_ID}/trigger/pipeline" | \ + tee pipeline; +} + +function check_pipeline { + PIPELINE_ID=$(jq ".id" pipeline) + PIPELINE_URL=$(jq ".web_url" pipeline) + echo + log INFO "Simnet pipeline ${PIPELINE_URL} was successfully triggered." + log INFO "Now we're polling it to obtain the distinguished status." +} + +function poll_pipeline { + # This is a workaround for a Gitlab bug, waits here until + # https://gitlab.com/gitlab-org/gitlab/-/issues/326137 gets fixed. + # The timeout is 360 curls with 8 sec interval, roughly an hour. + log INFO "Waiting on ${PIPELINE_ID} status..." + +# shellcheck disable=SC2034 + for i in {1..360}; do + STATUS=$(get_status); + log INFO "Triggered pipeline status is ${STATUS}"; + if [[ ${STATUS} =~ ^(pending|running|created)$ ]]; then + echo; + elif [[ ${STATUS} =~ ^(failed|canceled|skipped|manual)$ ]]; then + log DIE "Something's broken in: ${PIPELINE_URL}"; + elif [[ ${STATUS} =~ ^(success)$ ]]; then + log INFO "Look how green it is: ${PIPELINE_URL}" + exit 0 + else + log DIE "Something else has happened in ${PIPELINE_URL}" + fi + sleep 8; + done +} function get_status() { curl --silent \ @@ -33,19 +184,18 @@ function get_status() { jq --raw-output ".status"; } -echo "Waiting on ${PIPELINE_ID} status..." +function log { + local lvl msg fmt + lvl=$1 msg=$2 + fmt='+%Y-%m-%d %H:%M:%S' + lg_date=$(date "${fmt}") + if [[ "${lvl}" = "DIE" ]] ; then + lvl="ERROR" + echo "${lg_date} - ${lvl} - ${msg}" + exit 1 + else + echo "${lg_date} - ${lvl} - ${msg}" + fi +} -for i in $(seq 1 360); do - STATUS=$(get_status); - echo "Triggered pipeline status is ${STATUS}"; - if [[ ${STATUS} =~ ^(pending|running|created)$ ]]; then - echo; - elif [[ ${STATUS} =~ ^(failed|canceled|skipped|manual)$ ]]; then - echo "Something's broken in: ${PIPELINE_URL}"; exit 1; - elif [[ ${STATUS} =~ ^(success)$ ]]; then - echo "Look how green it is: ${PIPELINE_URL}"; exit 0; - else - echo "Something else has happened in ${PIPELINE_URL}"; exit 1; - fi -sleep 8; -done +main "$@" From 3e5b4a2444c22561f5d4a995dec28d7ddc01865f Mon Sep 17 00:00:00 2001 From: Joshy Orndorff Date: Wed, 9 Jun 2021 10:36:41 -0400 Subject: [PATCH 24/61] remove explicit unit return type (#9053) --- frame/support/procedural/src/pallet/expand/event.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/support/procedural/src/pallet/expand/event.rs b/frame/support/procedural/src/pallet/expand/event.rs index c4f7aeffa7367..204b5a23611cc 100644 --- a/frame/support/procedural/src/pallet/expand/event.rs +++ b/frame/support/procedural/src/pallet/expand/event.rs @@ -133,7 +133,7 @@ pub fn expand_event(def: &mut Def) -> proc_macro2::TokenStream { #deposit_event impl<#event_impl_gen> From<#event_ident<#event_use_gen>> for () #event_where_clause { - fn from(_: #event_ident<#event_use_gen>) -> () { () } + fn from(_: #event_ident<#event_use_gen>) {} } impl<#event_impl_gen> #event_ident<#event_use_gen> #event_where_clause { From eb9033b826b9e8115c20707fe66af0ceb177e99c Mon Sep 17 00:00:00 2001 From: Zeke Mostov <32168567+emostov@users.noreply.github.com> Date: Wed, 9 Jun 2021 15:05:28 -0700 Subject: [PATCH 25/61] [try-runtime-cli] Offchain worker support (#8966) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * make remote-ext work with ws and safe RPCs * Update docs. * Update utils/frame/remote-externalities/Cargo.toml Co-authored-by: Niklas Adolfsson * Fix test * Update lock file * Update utils/frame/remote-externalities/src/lib.rs Co-authored-by: Bastian Köcher * Fix build again. * checkpoint, merging the paged rpc now * revert lifetime stuff * WIP: remote client init not working * Small cleanups * use jsonrpsee alpha.7 * WIP * Executiing without errors * Reorg & cleanup * Trivial cleaning * Add txpool & keystore extension * Small cleaning * More :cleaning * Flags: page-size, override-code * WIP * Apply suggestions from code review Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Remove heap_pages * Dry code extraction from state * Formatting * More formatting * Add issue todo * Use jsonrpsee 0.2.0 * Try trigger gitlab * Fix "block_import_works" test * fix native_big_block_import_fails_on_fallback test * fix commit should work * Rewrite UI tests * Revert "Rewrite UI tests" This reverts commit ada7f670f701c21fb399946a3f6918453f537bcb. * try again with UI * Use const for legacy heap pages val * Move parse module to its own file * Move rpc_api module to its own file * Apply suggestions from code review Co-authored-by: Peter Goodspeed-Niklaus * trait names: Block, not B * Corect HEAP_PAGES_TEST_LEGACY export * Update utils/frame/remote-externalities/src/rpc_api.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Revert test_ext heap_page insert; adjust storage root instead * Doc comments for try_runtime::cli::Command * TryRuntime stub * trailing comma * Remove unused dev dep in frame-executive * Improve parse::hash variable name & error index * Use Result for rpc_api fns * Richer err messagges * Remove HEAP_PAGE_TEST_LEGACY * Update bin/node/executor/tests/basic.rs Co-authored-by: kianenigma Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Niklas Adolfsson Co-authored-by: Bastian Köcher Co-authored-by: Peter Goodspeed-Niklaus --- Cargo.lock | 6 + bin/node/cli/src/cli.rs | 7 +- bin/node/cli/src/command.rs | 7 +- bin/node/executor/tests/basic.rs | 3 + frame/executive/src/lib.rs | 2 +- primitives/state-machine/src/testing.rs | 7 +- utils/frame/remote-externalities/Cargo.toml | 2 +- utils/frame/remote-externalities/src/lib.rs | 27 +- .../frame/remote-externalities/src/rpc_api.rs | 53 +++ utils/frame/try-runtime/cli/Cargo.toml | 5 +- utils/frame/try-runtime/cli/src/lib.rs | 397 ++++++++++++------ utils/frame/try-runtime/cli/src/parse.rs | 44 ++ 12 files changed, 421 insertions(+), 139 deletions(-) create mode 100644 utils/frame/remote-externalities/src/rpc_api.rs create mode 100644 utils/frame/try-runtime/cli/src/parse.rs diff --git a/Cargo.lock b/Cargo.lock index cc8557daad2f4..a6c7873f6f066 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,5 +1,7 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +version = 3 + [[package]] name = "Inflector" version = "0.11.4" @@ -6787,6 +6789,7 @@ dependencies = [ "log", "pallet-elections-phragmen", "parity-scale-codec", + "serde", "serde_json", "sp-core", "sp-io", @@ -10647,14 +10650,17 @@ dependencies = [ "log", "parity-scale-codec", "remote-externalities", + "sc-chain-spec", "sc-cli", "sc-client-api", "sc-executor", "sc-service", + "serde", "sp-api", "sp-blockchain", "sp-core", "sp-externalities", + "sp-keystore", "sp-runtime", "sp-state-machine", "structopt", diff --git a/bin/node/cli/src/cli.rs b/bin/node/cli/src/cli.rs index 9b80a3e345290..11ea58f4068df 100644 --- a/bin/node/cli/src/cli.rs +++ b/bin/node/cli/src/cli.rs @@ -47,11 +47,14 @@ pub enum Subcommand { #[structopt(name = "benchmark", about = "Benchmark runtime pallets.")] Benchmark(frame_benchmarking_cli::BenchmarkCmd), - /// Try some experimental command on the runtime. This includes migration and runtime-upgrade - /// testing. + /// Try some command against runtime state. #[cfg(feature = "try-runtime")] TryRuntime(try_runtime_cli::TryRuntimeCmd), + /// Try some command against runtime state. Note: `try-runtime` feature must be enabled. + #[cfg(not(feature = "try-runtime"))] + TryRuntime, + /// Verify a signature for a message, provided on STDIN, with a given (public or secret) key. Verify(VerifyCmd), diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index ece97436bfdf4..1ef1da6ba6819 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -163,6 +163,11 @@ pub fn run() -> Result<()> { Ok((cmd.run::(config), task_manager)) }) - } + }, + #[cfg(not(feature = "try-runtime"))] + Some(Subcommand::TryRuntime) => { + Err("TryRuntime wasn't enabled when building the node. \ + You can enable it with `--features try-runtime`.".into()) + }, } } diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index 8c7b1eae5dec1..af9843715f135 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -697,6 +697,9 @@ fn native_big_block_import_succeeds() { fn native_big_block_import_fails_on_fallback() { let mut t = new_test_ext(compact_code_unwrap(), false); + // We set the heap pages to 8 because we know that should give an OOM in WASM with the given block. + set_heap_pages(&mut t.ext(), 8); + assert!( executor_call:: _>( &mut t, diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 593b8db92c60d..d8004e14acda1 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -835,7 +835,7 @@ mod tests { header: Header { parent_hash: [69u8; 32].into(), number: 1, - state_root: hex!("6e70de4fa07bac443dc7f8a812c8a0c941aacfa892bb373c5899f7d511d4c25b").into(), + state_root: hex!("ec6bb58b0e4bc7fdf0151a0f601eb825f529fbf90b5be5b2024deba30c5cbbcb").into(), extrinsics_root: hex!("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").into(), digest: Digest { logs: vec![], }, }, diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 250c2fd4e9a98..363d543da086f 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -30,12 +30,12 @@ use crate::{ }, }; -use codec::{Decode, Encode}; +use codec::Decode; use hash_db::Hasher; use sp_core::{ offchain::testing::TestPersistentOffchainDB, storage::{ - well_known_keys::{CHANGES_TRIE_CONFIG, CODE, HEAP_PAGES, is_child_storage_key}, + well_known_keys::{CHANGES_TRIE_CONFIG, CODE, is_child_storage_key}, Storage, }, traits::TaskExecutorExt, @@ -103,7 +103,6 @@ where assert!(storage.top.keys().all(|key| !is_child_storage_key(key))); assert!(storage.children_default.keys().all(|key| is_child_storage_key(key))); - storage.top.insert(HEAP_PAGES.to_vec(), 8u64.encode()); storage.top.insert(CODE.to_vec(), code.to_vec()); let mut extensions = Extensions::default(); @@ -308,7 +307,7 @@ mod tests { ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); ext.set_storage(b"dogglesworth".to_vec(), b"cat".to_vec()); - let root = H256::from(hex!("2a340d3dfd52f5992c6b117e9e45f479e6da5afffafeb26ab619cf137a95aeb8")); + let root = H256::from(hex!("ed4d8c799d996add422395a6abd7545491d40bd838d738afafa1b8a4de625489")); assert_eq!(H256::from_slice(ext.storage_root().as_slice()), root); } diff --git a/utils/frame/remote-externalities/Cargo.toml b/utils/frame/remote-externalities/Cargo.toml index 8f62d977baedd..a7519b7e47f33 100644 --- a/utils/frame/remote-externalities/Cargo.toml +++ b/utils/frame/remote-externalities/Cargo.toml @@ -20,8 +20,8 @@ hex = "0.4.0" env_logger = "0.8.2" log = "0.4.11" codec = { package = "parity-scale-codec", version = "2.0.0" } - serde_json = "1.0" +serde = "1.0.0" sp-io = { version = "3.0.0", path = "../../../primitives/io" } sp-core = { version = "3.0.0", path = "../../../primitives/core" } diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index 3ea97fc9d365b..a77650d042125 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -34,9 +34,11 @@ use sp_core::{ use codec::{Encode, Decode}; use sp_runtime::traits::Block as BlockT; use jsonrpsee_ws_client::{ - WsClientBuilder, WsClient, v2::params::JsonRpcParams, traits::Client, + WsClientBuilder, WsClient, v2::params::JsonRpcParams, }; +pub mod rpc_api; + type KeyPair = (StorageKey, StorageData); const LOG_TARGET: &str = "remote-ext"; @@ -72,7 +74,7 @@ impl Default for Mode { } } -/// configuration of the online execution. +/// Configuration of the offline execution. /// /// A state snapshot config must be present. #[derive(Clone)] @@ -81,7 +83,7 @@ pub struct OfflineConfig { pub state_snapshot: SnapshotConfig, } -/// Description of the transport protocol. +/// Description of the transport protocol (for online execution). #[derive(Debug)] pub struct Transport { uri: String, @@ -115,10 +117,17 @@ pub struct OnlineConfig { pub transport: Transport, } +impl OnlineConfig { + /// Return rpc (ws) client. + fn rpc_client(&self) -> &WsClient { + self.transport.client.as_ref().expect("ws client must have been initialized by now; qed.") + } +} + impl Default for OnlineConfig { fn default() -> Self { Self { - transport: Transport { uri: DEFAULT_TARGET.to_string(), client: None }, + transport: Transport { uri: DEFAULT_TARGET.to_owned(), client: None }, at: None, state_snapshot: None, modules: vec![], @@ -126,12 +135,6 @@ impl Default for OnlineConfig { } } -impl OnlineConfig { - /// Return rpc (ws) client. - fn rpc_client(&self) -> &WsClient { - self.transport.client.as_ref().expect("ws client must have been initialized by now; qed.") - } -} /// Configuration of the state snapshot. #[derive(Clone)] @@ -189,6 +192,7 @@ impl Builder { // RPC methods impl Builder { + /// Get the latest finalized head. async fn rpc_get_head(&self) -> Result { trace!(target: LOG_TARGET, "rpc: finalized_head"); RpcApi::::finalized_head(self.as_online().rpc_client()).await.map_err(|e| { @@ -250,6 +254,7 @@ impl Builder { prefix: StorageKey, at: B::Hash, ) -> Result, &'static str> { + use jsonrpsee_ws_client::traits::Client; use serde_json::to_value; let keys = self.get_keys_paged(prefix, at).await?; let keys_count = keys.len(); @@ -438,8 +443,10 @@ impl Builder { info!(target: LOG_TARGET, "injecting a total of {} keys", kv.len()); for (k, v) in kv { let (k, v) = (k.0, v.0); + // Insert the key,value pair into the test trie backend ext.insert(k, v); } + Ok(ext) } } diff --git a/utils/frame/remote-externalities/src/rpc_api.rs b/utils/frame/remote-externalities/src/rpc_api.rs new file mode 100644 index 0000000000000..e7fd021bac4a8 --- /dev/null +++ b/utils/frame/remote-externalities/src/rpc_api.rs @@ -0,0 +1,53 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! WS RPC API for one off RPC calls to a substrate node. +// TODO: Consolidate one off RPC calls https://github.com/paritytech/substrate/issues/8988 + +use super::*; + +/// Get the header of the block identified by `at` +pub async fn get_header>(from: S, at: B::Hash) -> Result +where + B::Header: serde::de::DeserializeOwned, +{ + use jsonrpsee_ws_client::traits::Client; + let at = serde_json::to_value(at) + .map_err(|e| format!("Block hash could not be converted to JSON due to {:?}", e))?; + let params = vec![at]; + let client = WsClientBuilder::default() + .max_request_body_size(u32::MAX) + .build(from.as_ref()) + .await + .map_err(|e| format!("`WsClientBuilder` failed to build do to {:?}", e))?; + client.request::("chain_getHeader", JsonRpcParams::Array(params)) + .await + .map_err(|e| format!("chain_getHeader request failed due to {:?}", e)) +} + +/// Get the finalized head +pub async fn get_finalized_head>(from: S) -> Result { + use jsonrpsee_ws_client::traits::Client; + let client = WsClientBuilder::default() + .max_request_body_size(u32::MAX) + .build(from.as_ref()) + .await + .map_err(|e| format!("`WsClientBuilder` failed to build do to {:?}", e))?; + client.request::("chain_getFinalizedHead", JsonRpcParams::NoParams) + .await + .map_err(|e| format!("chain_getFinalizedHead request failed due to {:?}", e)) +} diff --git a/utils/frame/try-runtime/cli/Cargo.toml b/utils/frame/try-runtime/cli/Cargo.toml index 4767d0db6783a..f262ba4812a0e 100644 --- a/utils/frame/try-runtime/cli/Cargo.toml +++ b/utils/frame/try-runtime/cli/Cargo.toml @@ -15,18 +15,21 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.8" parity-scale-codec = { version = "2.0.0" } +serde = "1.0.0" +structopt = "0.3.8" sc-service = { version = "0.9.0", default-features = false, path = "../../../../client/service" } sc-cli = { version = "0.9.0", path = "../../../../client/cli" } sc-executor = { version = "0.9.0", path = "../../../../client/executor" } sc-client-api = { version = "3.0.0", path = "../../../../client/api" } -structopt = "0.3.8" +sc-chain-spec = { version = "3.0.0", path = "../../../../client/chain-spec" } sp-state-machine = { version = "0.9.0", path = "../../../../primitives/state-machine" } sp-api = { version = "3.0.0", path = "../../../../primitives/api" } sp-blockchain = { version = "3.0.0", path = "../../../../primitives/blockchain" } sp-runtime = { version = "3.0.0", path = "../../../../primitives/runtime" } sp-externalities = { version = "0.9.0", path = "../../../../primitives/externalities" } sp-core = { version = "3.0.0", path = "../../../../primitives/core" } +sp-keystore = { version = "0.9.0", path = "../../../../primitives/keystore" } frame-try-runtime = { version = "0.9.0", path = "../../../../frame/try-runtime" } remote-externalities = { version = "0.9.0", path = "../../remote-externalities" } diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index c4adab3ce8f85..dc4cb7cd33dbd 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -15,23 +15,61 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! `Structopt`-ready struct for `try-runtime`. +//! `Structopt`-ready structs for `try-runtime`. -use parity_scale_codec::Decode; -use std::{fmt::Debug, path::PathBuf, str::FromStr}; +use parity_scale_codec::{Decode, Encode}; +use std::{fmt::Debug, path::PathBuf, str::FromStr, sync::Arc}; use sc_service::Configuration; use sc_cli::{CliConfiguration, ExecutionStrategy, WasmExecutionMethod}; use sc_executor::NativeExecutor; use sc_service::NativeExecutionDispatch; +use sc_chain_spec::ChainSpec; use sp_state_machine::StateMachine; use sp_runtime::traits::{Block as BlockT, NumberFor}; -use sp_core::storage::{StorageData, StorageKey, well_known_keys}; +use sp_core::{ + offchain::{ + OffchainWorkerExt, OffchainDbExt, TransactionPoolExt, + testing::{TestOffchainExt, TestTransactionPoolExt} + }, + storage::{StorageData, StorageKey, well_known_keys}, +}; +use sp_keystore::{KeystoreExt, testing::KeyStore}; +use remote_externalities::{Builder, Mode, SnapshotConfig, OfflineConfig, OnlineConfig, rpc_api}; -/// Various commands to try out the new runtime, over configurable states. -/// -/// For now this only assumes running the `on_runtime_upgrade` hooks. -#[derive(Debug, structopt::StructOpt)] -pub struct TryRuntimeCmd { +mod parse; + +/// Possible subcommands of `try-runtime`. +#[derive(Debug, Clone, structopt::StructOpt)] +pub enum Command { + /// Execute "TryRuntime_on_runtime_upgrade" against the given runtime state. + OnRuntimeUpgrade(OnRuntimeUpgradeCmd), + /// Execute "OffchainWorkerApi_offchain_worker" against the given runtime state. + OffchainWorker(OffchainWorkerCmd), +} + +#[derive(Debug, Clone, structopt::StructOpt)] +pub struct OnRuntimeUpgradeCmd { + #[structopt(subcommand)] + pub state: State, +} + +#[derive(Debug, Clone, structopt::StructOpt)] +pub struct OffchainWorkerCmd { + /// Hash of the block whose header to use to execute the offchain worker. + #[structopt(short, long, multiple = false, parse(try_from_str = parse::hash))] + pub header_at: String, + + #[structopt(subcommand)] + pub state: State, + + /// Whether or not to overwrite the code from state with the code from + /// the specified chain spec. + #[structopt(long)] + pub overwrite_code: bool, +} + +#[derive(Debug, Clone, structopt::StructOpt)] +pub struct SharedParams { /// The shared parameters #[allow(missing_docs)] #[structopt(flatten)] @@ -43,7 +81,7 @@ pub struct TryRuntimeCmd { value_name = "STRATEGY", possible_values = &ExecutionStrategy::variants(), case_insensitive = true, - default_value = "Native", + default_value = "Wasm", )] pub execution: ExecutionStrategy, @@ -53,24 +91,38 @@ pub struct TryRuntimeCmd { value_name = "METHOD", possible_values = &WasmExecutionMethod::variants(), case_insensitive = true, - default_value = "Interpreted" + default_value = "Compiled" )] pub wasm_method: WasmExecutionMethod, - /// The state to use to run the migration. + /// The number of 64KB pages to allocate for Wasm execution. Defaults to + /// sc_service::Configuration.default_heap_pages. + #[structopt(long)] + pub heap_pages: Option, +} + +/// Various commands to try out against runtime state at a specific block. +#[derive(Debug, Clone, structopt::StructOpt)] +pub struct TryRuntimeCmd { + #[structopt(flatten)] + pub shared: SharedParams, + #[structopt(subcommand)] - pub state: State, + pub command: Command, } -/// The state to use for a migration dry-run. -#[derive(Debug, structopt::StructOpt)] +/// The source of runtime state to try operations against. +#[derive(Debug, Clone, structopt::StructOpt)] pub enum State { - /// Use a state snapshot as state to run the migration. + /// Use a state snapshot as the source of runtime state. NOTE: for the offchain-worker command this + /// is only partially supported at the moment and you must have a relevant archive node exposed on + /// localhost:9944 in order to query the block header. + // TODO https://github.com/paritytech/substrate/issues/9027 Snap { snapshot_path: PathBuf, }, - /// Use a live chain to run the migration. + /// Use a live chain as the source of runtime state. Live { /// An optional state snapshot file to WRITE to. Not written if set to `None`. #[structopt(short, long)] @@ -78,7 +130,7 @@ pub enum State { /// The block hash at which to connect. /// Will be latest finalized head if not provided. - #[structopt(short, long, multiple = false, parse(try_from_str = parse_hash))] + #[structopt(short, long, multiple = false, parse(try_from_str = parse::hash))] block_at: Option, /// The modules to scrape. If empty, entire chain state will be scraped. @@ -86,136 +138,243 @@ pub enum State { modules: Option>, /// The url to connect to. - #[structopt(default_value = "ws://localhost:9944", parse(try_from_str = parse_url))] + #[structopt(default_value = "ws://localhost:9944", parse(try_from_str = parse::url))] url: String, }, } -fn parse_hash(block_number: &str) -> Result { - let block_number = if block_number.starts_with("0x") { - &block_number[2..] +async fn on_runtime_upgrade( + shared: SharedParams, + command: OnRuntimeUpgradeCmd, + config: Configuration +) -> sc_cli::Result<()> +where + Block: BlockT, + Block::Hash: FromStr, + ::Err: Debug, + NumberFor: FromStr, + as FromStr>::Err: Debug, + ExecDispatch: NativeExecutionDispatch + 'static, +{ + let wasm_method = shared.wasm_method; + let execution = shared.execution; + let heap_pages = if shared.heap_pages.is_some() { + shared.heap_pages } else { - block_number + config.default_heap_pages }; - if let Some(pos) = block_number.chars().position(|c| !c.is_ascii_hexdigit()) { - Err(format!( - "Expected block hash, found illegal hex character at position: {}", - 2 + pos, - )) - } else { - Ok(block_number.into()) - } + let mut changes = Default::default(); + let max_runtime_instances = config.max_runtime_instances; + let executor = NativeExecutor::::new( + wasm_method.into(), + heap_pages, + max_runtime_instances, + ); + + let ext = { + let builder = match command.state { + State::Snap { snapshot_path } => { + Builder::::new().mode(Mode::Offline(OfflineConfig { + state_snapshot: SnapshotConfig::new(snapshot_path), + })) + }, + State::Live { + url, + snapshot_path, + block_at, + modules + } => Builder::::new().mode(Mode::Online(OnlineConfig { + transport: url.to_owned().into(), + state_snapshot: snapshot_path.as_ref().map(SnapshotConfig::new), + modules: modules.to_owned().unwrap_or_default(), + at: block_at.as_ref() + .map(|b| b.parse().map_err(|e| format!("Could not parse hash: {:?}", e))).transpose()?, + ..Default::default() + })), + }; + + let (code_key, code) = extract_code(config.chain_spec)?; + builder.inject(&[(code_key, code)]).build().await? + }; + + let encoded_result = StateMachine::<_, _, NumberFor, _>::new( + &ext.backend, + None, + &mut changes, + &executor, + "TryRuntime_on_runtime_upgrade", + &[], + ext.extensions, + &sp_state_machine::backend::BackendRuntimeCode::new(&ext.backend) + .runtime_code()?, + sp_core::testing::TaskExecutor::new(), + ) + .execute(execution.into()) + .map_err(|e| format!("failed to execute 'TryRuntime_on_runtime_upgrade' due to {:?}", e))?; + + let (weight, total_weight) = <(u64, u64) as Decode>::decode(&mut &*encoded_result) + .map_err(|e| format!("failed to decode output due to {:?}", e))?; + log::info!( + "TryRuntime_on_runtime_upgrade executed without errors. Consumed weight = {}, total weight = {} ({})", + weight, + total_weight, + weight as f64 / total_weight as f64 + ); + + Ok(()) } -fn parse_url(s: &str) -> Result { - if s.starts_with("ws://") || s.starts_with("wss://") { - // could use Url crate as well, but lets keep it simple for now. - Ok(s.to_string()) +async fn offchain_worker( + shared: SharedParams, + command: OffchainWorkerCmd, + config: Configuration, +)-> sc_cli::Result<()> +where + Block: BlockT, + Block::Hash: FromStr, + Block::Header: serde::de::DeserializeOwned, + ::Err: Debug, + NumberFor: FromStr, + as FromStr>::Err: Debug, + ExecDispatch: NativeExecutionDispatch + 'static, +{ + let wasm_method = shared.wasm_method; + let execution = shared.execution; + let heap_pages = if shared.heap_pages.is_some() { + shared.heap_pages } else { - Err("not a valid WS(S) url: must start with 'ws://' or 'wss://'") - } -} + config.default_heap_pages + }; -impl TryRuntimeCmd { - pub async fn run(&self, config: Configuration) -> sc_cli::Result<()> - where - B: BlockT, - B::Hash: FromStr, - ::Err: Debug, - NumberFor: FromStr, - as FromStr>::Err: Debug, - ExecDispatch: NativeExecutionDispatch + 'static, - { - let spec = config.chain_spec; - let genesis_storage = spec.build_storage()?; - - let code = StorageData( - genesis_storage - .top - .get(well_known_keys::CODE) - .expect("code key must exist in genesis storage; qed") - .to_vec(), - ); - let code_key = StorageKey(well_known_keys::CODE.to_vec()); - - let wasm_method = self.wasm_method; - let execution = self.execution; - - let mut changes = Default::default(); - // don't really care about these -- use the default values. - let max_runtime_instances = config.max_runtime_instances; - let heap_pages = config.default_heap_pages; - let executor = NativeExecutor::::new( - wasm_method.into(), - heap_pages, - max_runtime_instances, - ); - - let ext = { - use remote_externalities::{Builder, Mode, SnapshotConfig, OfflineConfig, OnlineConfig}; - let builder = match &self.state { - State::Snap { snapshot_path } => { - Builder::::new().mode(Mode::Offline(OfflineConfig { - state_snapshot: SnapshotConfig::new(snapshot_path), - })) - }, - State::Live { - url, - snapshot_path, - block_at, - modules - } => Builder::::new().mode(Mode::Online(OnlineConfig { + let mut changes = Default::default(); + let max_runtime_instances = config.max_runtime_instances; + let executor = NativeExecutor::::new( + wasm_method.into(), + heap_pages, + max_runtime_instances, + ); + + let (mode, url) = match command.state { + State::Live { + url, + snapshot_path, + block_at, + modules + } => { + let online_config = OnlineConfig { transport: url.to_owned().into(), state_snapshot: snapshot_path.as_ref().map(SnapshotConfig::new), modules: modules.to_owned().unwrap_or_default(), at: block_at.as_ref() .map(|b| b.parse().map_err(|e| format!("Could not parse hash: {:?}", e))).transpose()?, ..Default::default() - })), - }; + }; - // inject the code into this ext. - builder.inject(&[(code_key, code)]).build().await? - }; + (Mode::Online(online_config), url) + }, + State::Snap { snapshot_path } => { + // TODO This is a temporary hack; the url is used just to get the header. We should try + // and get the header out of state, OR use an arbitrary header if thats ok, OR allow + // the user to feed in a header via file. + // https://github.com/paritytech/substrate/issues/9027 + // This assumes you have a node running on local host default + let url = "ws://127.0.0.1:9944".to_string(); + let mode = Mode::Offline(OfflineConfig { + state_snapshot: SnapshotConfig::new(snapshot_path), + }); + + (mode, url) + } + }; + let builder = Builder::::new().mode(mode); + let mut ext = if command.overwrite_code { + let (code_key, code) = extract_code(config.chain_spec)?; + builder.inject(&[(code_key, code)]).build().await? + } else { + builder.build().await? + }; + + // register externality extensions in order to provide host interface for OCW to the runtime. + let (offchain, _offchain_state) = TestOffchainExt::new(); + let (pool, _pool_state) = TestTransactionPoolExt::new(); + ext.register_extension(OffchainDbExt::new(offchain.clone())); + ext.register_extension(OffchainWorkerExt::new(offchain)); + ext.register_extension(KeystoreExt(Arc::new(KeyStore::new()))); + ext.register_extension(TransactionPoolExt::new(pool)); + + let header_hash: Block::Hash = command.header_at + .parse() + .map_err(|e| format!("Could not parse header hash: {:?}", e))?; + let header = rpc_api::get_header::(url, header_hash).await?; + + let _ = StateMachine::<_, _, NumberFor, _>::new( + &ext.backend, + None, + &mut changes, + &executor, + "OffchainWorkerApi_offchain_worker", + header.encode().as_ref(), + ext.extensions, + &sp_state_machine::backend::BackendRuntimeCode::new(&ext.backend) + .runtime_code()?, + sp_core::testing::TaskExecutor::new(), + ) + .execute(execution.into()) + .map_err(|e| format!("failed to execute 'OffchainWorkerApi_offchain_worker' due to {:?}", e))?; + + log::info!("OffchainWorkerApi_offchain_worker executed without errors."); + + Ok(()) +} - let encoded_result = StateMachine::<_, _, NumberFor, _>::new( - &ext.backend, - None, - &mut changes, - &executor, - "TryRuntime_on_runtime_upgrade", - &[], - ext.extensions, - &sp_state_machine::backend::BackendRuntimeCode::new(&ext.backend) - .runtime_code()?, - sp_core::testing::TaskExecutor::new(), - ) - .execute(execution.into()) - .map_err(|e| format!("failed to execute 'TryRuntime_on_runtime_upgrade' due to {:?}", e))?; - - let (weight, total_weight) = <(u64, u64) as Decode>::decode(&mut &*encoded_result) - .map_err(|e| format!("failed to decode output due to {:?}", e))?; - log::info!( - "try-runtime executed without errors. Consumed weight = {}, total weight = {} ({})", - weight, - total_weight, - weight as f64 / total_weight as f64 - ); - - Ok(()) +impl TryRuntimeCmd { + pub async fn run(&self, config: Configuration) -> sc_cli::Result<()> + where + Block: BlockT, + Block::Header: serde::de::DeserializeOwned, + Block::Hash: FromStr, + ::Err: Debug, + NumberFor: FromStr, + as FromStr>::Err: Debug, + ExecDispatch: NativeExecutionDispatch + 'static, + { + match &self.command { + Command::OnRuntimeUpgrade(ref cmd) => { + on_runtime_upgrade::(self.shared.clone(), cmd.clone(), config).await + } + Command::OffchainWorker(cmd) => { + offchain_worker::(self.shared.clone(), cmd.clone(), config).await + } + } } } impl CliConfiguration for TryRuntimeCmd { fn shared_params(&self) -> &sc_cli::SharedParams { - &self.shared_params + &self.shared.shared_params } fn chain_id(&self, _is_dev: bool) -> sc_cli::Result { - Ok(match self.shared_params.chain { + Ok(match self.shared.shared_params.chain { Some(ref chain) => chain.clone(), None => "dev".into(), }) } } + +/// Extract `:code` from the given chain spec and return as `StorageData` along with the +/// corresponding `StorageKey`. +fn extract_code(spec: Box) -> sc_cli::Result<(StorageKey, StorageData)> { + let genesis_storage = spec.build_storage()?; + let code = StorageData( + genesis_storage + .top + .get(well_known_keys::CODE) + .expect("code key must exist in genesis storage; qed") + .to_vec(), + ); + let code_key = StorageKey(well_known_keys::CODE.to_vec()); + + Ok((code_key, code)) +} diff --git a/utils/frame/try-runtime/cli/src/parse.rs b/utils/frame/try-runtime/cli/src/parse.rs new file mode 100644 index 0000000000000..beb9a6508fed1 --- /dev/null +++ b/utils/frame/try-runtime/cli/src/parse.rs @@ -0,0 +1,44 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Utils for parsing user input + +pub(crate) fn hash(block_hash: &str) -> Result { + let (block_hash, offset) = if block_hash.starts_with("0x") { + (&block_hash[2..], 2) + } else { + (block_hash, 0) + }; + + if let Some(pos) = block_hash.chars().position(|c| !c.is_ascii_hexdigit()) { + Err(format!( + "Expected block hash, found illegal hex character at position: {}", + offset + pos, + )) + } else { + Ok(block_hash.into()) + } +} + +pub(crate) fn url(s: &str) -> Result { + if s.starts_with("ws://") || s.starts_with("wss://") { + // could use Url crate as well, but lets keep it simple for now. + Ok(s.to_string()) + } else { + Err("not a valid WS(S) url: must start with 'ws://' or 'wss://'") + } +} From 2c84b31c86a62353ad3dbf04fae971b18470c9f5 Mon Sep 17 00:00:00 2001 From: Folyd Date: Fri, 11 Jun 2021 01:31:49 +0800 Subject: [PATCH 26/61] Migrate ProfilingLayer to tracing registry API (#8943) * Migrate ProfilingLayer to tracing registry API * Remove the `current_span` field from `BlockSubscriber`. * Bump the `tracing-subscriber` version * Fix Gitlab CI --- Cargo.lock | 4 +- client/executor/Cargo.toml | 2 +- client/tracing/Cargo.toml | 4 +- client/tracing/src/block/mod.rs | 17 +--- client/tracing/src/lib.rs | 156 +++++++++++++++++------------- client/tracing/src/logging/mod.rs | 4 +- primitives/tracing/Cargo.toml | 2 +- 7 files changed, 99 insertions(+), 90 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a6c7873f6f066..dc2c67ad1883a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10519,9 +10519,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ab8966ac3ca27126141f7999361cc97dd6fb4b71da04c02044fa9045d98bb96" +checksum = "aa5553bf0883ba7c9cbe493b085c29926bd41b66afc31ff72cf17ff4fb60dcd5" dependencies = [ "ansi_term 0.12.1", "chrono", diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index f9ebfd9bd5de5..7cb2e12fd3913 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -48,7 +48,7 @@ sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } sp-tracing = { version = "3.0.0", path = "../../primitives/tracing" } sc-tracing = { version = "3.0.0", path = "../tracing" } tracing = "0.1.25" -tracing-subscriber = "0.2.15" +tracing-subscriber = "0.2.18" paste = "1.0" [features] diff --git a/client/tracing/Cargo.toml b/client/tracing/Cargo.toml index a455cd8ab95c3..1121b922494c9 100644 --- a/client/tracing/Cargo.toml +++ b/client/tracing/Cargo.toml @@ -26,8 +26,8 @@ serde = "1.0.101" serde_json = "1.0.41" thiserror = "1.0.21" tracing = "0.1.25" -tracing-log = "0.1.1" -tracing-subscriber = "0.2.15" +tracing-log = "0.1.2" +tracing-subscriber = "0.2.18" sp-tracing = { version = "3.0.0", path = "../../primitives/tracing" } sp-rpc = { version = "3.0.0", path = "../../primitives/rpc" } sp-block-builder = { version = "3.0.0", path = "../../primitives/block-builder" } diff --git a/client/tracing/src/block/mod.rs b/client/tracing/src/block/mod.rs index 70e74b1d82788..bc5342c859980 100644 --- a/client/tracing/src/block/mod.rs +++ b/client/tracing/src/block/mod.rs @@ -20,7 +20,6 @@ use std::{collections::HashMap, sync::{Arc, atomic::{AtomicU64, Ordering}}, time use parking_lot::Mutex; use tracing::{Dispatch, dispatcher, Subscriber, Level, span::{Attributes, Record, Id}}; -use tracing_subscriber::CurrentSpan; use sc_client_api::BlockBackend; use sc_rpc_server::MAX_PAYLOAD; @@ -75,7 +74,6 @@ pub enum Error { struct BlockSubscriber { targets: Vec<(String, Level)>, next_id: AtomicU64, - current_span: CurrentSpan, spans: Mutex>, events: Mutex>, } @@ -93,7 +91,6 @@ impl BlockSubscriber { BlockSubscriber { targets, next_id, - current_span: CurrentSpan::default(), spans: Mutex::new(HashMap::new()), events: Mutex::new(Vec::new()), } @@ -117,8 +114,7 @@ impl Subscriber for BlockSubscriber { let id = Id::from_u64(self.next_id.fetch_add(1, Ordering::Relaxed)); let mut values = Values::default(); attrs.record(&mut values); - let parent_id = attrs.parent().cloned() - .or_else(|| self.current_span.id()); + let parent_id = attrs.parent().cloned(); let span = SpanDatum { id: id.clone(), parent_id, @@ -150,8 +146,7 @@ impl Subscriber for BlockSubscriber { fn event(&self, event: &tracing::Event<'_>) { let mut values = crate::Values::default(); event.record(&mut values); - let parent_id = event.parent().cloned() - .or_else(|| self.current_span.id()); + let parent_id = event.parent().cloned(); let trace_event = TraceEvent { name: event.metadata().name().to_owned(), target: event.metadata().target().to_owned(), @@ -162,14 +157,10 @@ impl Subscriber for BlockSubscriber { self.events.lock().push(trace_event); } - fn enter(&self, id: &Id) { - self.current_span.enter(id.clone()); + fn enter(&self, _id: &Id) { } - fn exit(&self, span: &Id) { - if self.spans.lock().contains_key(span) { - self.current_span.exit(); - } + fn exit(&self, _span: &Id) { } } diff --git a/client/tracing/src/lib.rs b/client/tracing/src/lib.rs index 72992a9ab05fa..9f02bb96e4f77 100644 --- a/client/tracing/src/lib.rs +++ b/client/tracing/src/lib.rs @@ -28,26 +28,23 @@ #![warn(missing_docs)] -pub mod logging; pub mod block; +pub mod logging; use rustc_hash::FxHashMap; +use serde::ser::{Serialize, SerializeMap, Serializer}; +use sp_tracing::{WASM_NAME_KEY, WASM_TARGET_KEY, WASM_TRACE_IDENTIFIER}; use std::fmt; use std::time::{Duration, Instant}; -use parking_lot::Mutex; -use serde::ser::{Serialize, Serializer, SerializeMap}; use tracing::{ event::Event, - field::{Visit, Field}, - Level, + field::{Field, Visit}, span::{Attributes, Id, Record}, subscriber::Subscriber, + Level, }; -use tracing_subscriber::{ - CurrentSpan, - layer::{Layer, Context}, -}; -use sp_tracing::{WASM_NAME_KEY, WASM_TARGET_KEY, WASM_TRACE_IDENTIFIER}; +use tracing_subscriber::layer::{Context, Layer}; +use tracing_subscriber::registry::LookupSpan; #[doc(hidden)] pub use tracing; @@ -58,8 +55,6 @@ const ZERO_DURATION: Duration = Duration::from_nanos(0); pub struct ProfilingLayer { targets: Vec<(String, Level)>, trace_handler: Box, - span_data: Mutex>, - current_span: CurrentSpan, } /// Used to configure how to receive the metrics @@ -142,10 +137,10 @@ impl Values { /// Checks if all individual collections are empty pub fn is_empty(&self) -> bool { - self.bool_values.is_empty() && - self.i64_values.is_empty() && - self.u64_values.is_empty() && - self.string_values.is_empty() + self.bool_values.is_empty() + && self.i64_values.is_empty() + && self.u64_values.is_empty() + && self.string_values.is_empty() } } @@ -225,8 +220,6 @@ impl ProfilingLayer { Self { targets, trace_handler, - span_data: Mutex::new(FxHashMap::default()), - current_span: Default::default(), } } @@ -257,32 +250,56 @@ fn parse_target(s: &str) -> (String, Level) { } } -impl Layer for ProfilingLayer { - fn new_span(&self, attrs: &Attributes<'_>, id: &Id, _ctx: Context) { - let mut values = Values::default(); - attrs.record(&mut values); - let span_datum = SpanDatum { - id: id.clone(), - parent_id: attrs.parent().cloned().or_else(|| self.current_span.id()), - name: attrs.metadata().name().to_owned(), - target: attrs.metadata().target().to_owned(), - level: *attrs.metadata().level(), - line: attrs.metadata().line().unwrap_or(0), - start_time: Instant::now(), - overall_time: ZERO_DURATION, - values, - }; - self.span_data.lock().insert(id.clone(), span_datum); +impl Layer for ProfilingLayer +where + S: Subscriber + for<'span> LookupSpan<'span>, +{ + fn new_span(&self, attrs: &Attributes<'_>, id: &Id, ctx: Context) { + if let Some(span) = ctx.span(id) { + let mut extension = span.extensions_mut(); + let parent_id = attrs.parent().cloned().or_else(|| { + if attrs.is_contextual() { + ctx.lookup_current().map(|span| span.id()) + } else { + None + } + }); + + let mut values = Values::default(); + attrs.record(&mut values); + let span_datum = SpanDatum { + id: id.clone(), + parent_id, + name: attrs.metadata().name().to_owned(), + target: attrs.metadata().target().to_owned(), + level: *attrs.metadata().level(), + line: attrs.metadata().line().unwrap_or(0), + start_time: Instant::now(), + overall_time: ZERO_DURATION, + values, + }; + extension.insert(span_datum); + } } - fn on_record(&self, span: &Id, values: &Record<'_>, _ctx: Context) { - let mut span_data = self.span_data.lock(); - if let Some(s) = span_data.get_mut(span) { - values.record(&mut s.values); + fn on_record(&self, id: &Id, values: &Record<'_>, ctx: Context) { + if let Some(span) = ctx.span(id) { + let mut extensions = span.extensions_mut(); + if let Some(s) = extensions.get_mut::() { + values.record(&mut s.values); + } } } - fn on_event(&self, event: &Event<'_>, _ctx: Context) { + fn on_event(&self, event: &Event<'_>, ctx: Context) { + let parent_id = event.parent().cloned().or_else(|| { + if event.is_contextual() { + ctx.lookup_current().map(|span| span.id()) + } else { + None + } + }); + let mut values = Values::default(); event.record(&mut values); let trace_event = TraceEvent { @@ -290,46 +307,46 @@ impl Layer for ProfilingLayer { target: event.metadata().target().to_owned(), level: *event.metadata().level(), values, - parent_id: event.parent().cloned().or_else(|| self.current_span.id()), + parent_id, }; self.trace_handler.handle_event(trace_event); } - fn on_enter(&self, span: &Id, _ctx: Context) { - self.current_span.enter(span.clone()); - let mut span_data = self.span_data.lock(); - let start_time = Instant::now(); - if let Some(mut s) = span_data.get_mut(&span) { - s.start_time = start_time; + fn on_enter(&self, span: &Id, ctx: Context) { + if let Some(span) = ctx.span(span) { + let mut extensions = span.extensions_mut(); + if let Some(s) = extensions.get_mut::() { + let start_time = Instant::now(); + s.start_time = start_time; + } } } - fn on_exit(&self, span: &Id, _ctx: Context) { - let end_time = Instant::now(); - let span_datum = { - let mut span_data = self.span_data.lock(); - span_data.remove(&span) - }; - - if let Some(mut span_datum) = span_datum { - // If `span_datum` is `None` we don't exit (we'd be exiting the parent span) - self.current_span.exit(); - span_datum.overall_time += end_time - span_datum.start_time; - if span_datum.name == WASM_TRACE_IDENTIFIER { - span_datum.values.bool_values.insert("wasm".to_owned(), true); - if let Some(n) = span_datum.values.string_values.remove(WASM_NAME_KEY) { - span_datum.name = n; - } - if let Some(t) = span_datum.values.string_values.remove(WASM_TARGET_KEY) { - span_datum.target = t; - } - if self.check_target(&span_datum.target, &span_datum.level) { + fn on_exit(&self, span: &Id, ctx: Context) { + if let Some(span) = ctx.span(span) { + let end_time = Instant::now(); + let mut extensions = span.extensions_mut(); + if let Some(mut span_datum) = extensions.remove::() { + span_datum.overall_time += end_time - span_datum.start_time; + if span_datum.name == WASM_TRACE_IDENTIFIER { + span_datum + .values + .bool_values + .insert("wasm".to_owned(), true); + if let Some(n) = span_datum.values.string_values.remove(WASM_NAME_KEY) { + span_datum.name = n; + } + if let Some(t) = span_datum.values.string_values.remove(WASM_TARGET_KEY) { + span_datum.target = t; + } + if self.check_target(&span_datum.target, &span_datum.level) { + self.trace_handler.handle_span(span_datum); + } + } else { self.trace_handler.handle_span(span_datum); } - } else { - self.trace_handler.handle_span(span_datum); } - }; + } } fn on_close(&self, _span: Id, _ctx: Context) {} @@ -414,6 +431,7 @@ impl From for sp_rpc::tracing::Span { #[cfg(test)] mod tests { use super::*; + use parking_lot::Mutex; use std::sync::Arc; use tracing_subscriber::layer::SubscriberExt; diff --git a/client/tracing/src/logging/mod.rs b/client/tracing/src/logging/mod.rs index 63daa0b29ce18..a3fa3a531b3e4 100644 --- a/client/tracing/src/logging/mod.rs +++ b/client/tracing/src/logging/mod.rs @@ -398,7 +398,7 @@ mod tests { #[test] fn prefix_in_log_lines() { let re = regex::Regex::new(&format!( - r"^\d{{4}}-\d{{2}}-\d{{2}} \d{{2}}:\d{{2}}:\d{{2}} \[{}\] {}$", + r"^\d{{4}}-\d{{2}}-\d{{2}} \d{{2}}:\d{{2}}:\d{{2}} \[{}\] {}$", EXPECTED_NODE_NAME, EXPECTED_LOG_MESSAGE, )) .unwrap(); @@ -448,7 +448,7 @@ mod tests { #[test] fn do_not_write_with_colors_on_tty() { let re = regex::Regex::new(&format!( - r"^\d{{4}}-\d{{2}}-\d{{2}} \d{{2}}:\d{{2}}:\d{{2}} {}$", + r"^\d{{4}}-\d{{2}}-\d{{2}} \d{{2}}:\d{{2}}:\d{{2}} {}$", EXPECTED_LOG_MESSAGE, )) .unwrap(); diff --git a/primitives/tracing/Cargo.toml b/primitives/tracing/Cargo.toml index 6c4d70b109cd7..2c4b7dc12c744 100644 --- a/primitives/tracing/Cargo.toml +++ b/primitives/tracing/Cargo.toml @@ -23,7 +23,7 @@ codec = { version = "2.0.0", package = "parity-scale-codec", default-features = tracing = { version = "0.1.25", default-features = false } tracing-core = { version = "0.1.17", default-features = false } log = { version = "0.4.8", optional = true } -tracing-subscriber = { version = "0.2.15", optional = true, features = ["tracing-log"] } +tracing-subscriber = { version = "0.2.18", optional = true, features = ["tracing-log"] } parking_lot = { version = "0.10.0", optional = true } erased-serde = { version = "0.3.9", optional = true } serde = { version = "1.0.101", optional = true } From 8f99b4bb177b8cf57b624a10481b6a1e2ea72809 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Fri, 11 Jun 2021 12:18:41 +0100 Subject: [PATCH 27/61] grandpa: ignore justifications from other consensus engines (#9075) --- client/finality-grandpa/src/import.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index 474f6ee5bf7e5..3d22cc8866100 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -646,10 +646,12 @@ where initial_sync: bool, ) -> Result<(), ConsensusError> { if justification.0 != GRANDPA_ENGINE_ID { - return Err(ConsensusError::ClientImport(format!( - "Expected GRANDPA Justification, got {}.", - String::from_utf8_lossy(&justification.0) - ))); + // TODO: the import queue needs to be refactored to be able dispatch to the correct + // `JustificationImport` instance based on `ConsensusEngineId`, or we need to build a + // justification import pipeline similar to what we do for `BlockImport`. In the + // meantime we'll just drop the justification, since this is only used for BEEFY which + // is still WIP. + return Ok(()); } let justification = GrandpaJustification::decode_and_verify_finalizes( From 218bd4ffb20565e29a8fcca2f8f3ed005fc7d6cc Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 11 Jun 2021 14:36:37 +0200 Subject: [PATCH 28/61] Don't connect to reserved nodes if they're banned (#9020) --- client/peerset/src/lib.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/client/peerset/src/lib.rs b/client/peerset/src/lib.rs index 19260afccb802..36d1e1831cec6 100644 --- a/client/peerset/src/lib.rs +++ b/client/peerset/src/lib.rs @@ -444,6 +444,8 @@ impl Peerset { set_id: SetId(set_index), peer_id: peer.into_peer_id(), }); + + self.alloc_slots(SetId(set_index)); } } } @@ -524,6 +526,14 @@ impl Peerset { peersstate::Peer::Connected(_) => continue, }; + // Don't connect to nodes with an abysmal reputation, even if they're reserved. + // This is a rather opinionated behaviour, and it wouldn't be fundamentally wrong to + // remove that check. If necessary, the peerset should be refactored to give more + // control over what happens in that situation. + if entry.reputation() < BANNED_THRESHOLD { + break; + } + match entry.try_outgoing() { Ok(conn) => self.message_queue.push_back(Message::Connect { set_id, From 155ac5bcadc3143c60ffded6a3af47bce3fb12e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 11 Jun 2021 16:12:57 +0100 Subject: [PATCH 29/61] Try fix ui tests (#9082) * Try fix ui tests * More --- frame/support/test/Cargo.toml | 2 +- frame/support/test/tests/derive_no_bound_ui/eq.stderr | 2 +- primitives/api/proc-macro/Cargo.toml | 1 - primitives/api/test/Cargo.toml | 2 +- .../test/tests/ui/impl_incorrect_method_signature.stderr | 4 ++-- .../api/test/tests/ui/mock_only_self_reference.stderr | 8 ++++---- .../ui/type_reference_in_impl_runtime_apis_call.stderr | 4 ++-- primitives/npos-elections/compact/Cargo.toml | 2 +- primitives/runtime-interface/Cargo.toml | 2 +- test-utils/Cargo.toml | 2 +- 10 files changed, 14 insertions(+), 15 deletions(-) diff --git a/frame/support/test/Cargo.toml b/frame/support/test/Cargo.toml index 85236a20f60e5..1a979cdee6f8e 100644 --- a/frame/support/test/Cargo.toml +++ b/frame/support/test/Cargo.toml @@ -20,7 +20,7 @@ frame-support = { version = "3.0.0", default-features = false, path = "../" } sp-runtime = { version = "3.0.0", default-features = false, path = "../../../primitives/runtime" } sp-core = { version = "3.0.0", default-features = false, path = "../../../primitives/core" } sp-std = { version = "3.0.0", default-features = false, path = "../../../primitives/std" } -trybuild = "1.0.38" +trybuild = "1.0.42" pretty_assertions = "0.6.1" rustversion = "1.0.0" frame-metadata = { version = "13.0.0", default-features = false, path = "../../metadata" } diff --git a/frame/support/test/tests/derive_no_bound_ui/eq.stderr b/frame/support/test/tests/derive_no_bound_ui/eq.stderr index 36384178d469b..fce13d6f17f06 100644 --- a/frame/support/test/tests/derive_no_bound_ui/eq.stderr +++ b/frame/support/test/tests/derive_no_bound_ui/eq.stderr @@ -7,6 +7,6 @@ error[E0277]: can't compare `Foo` with `Foo` ::: $RUST/core/src/cmp.rs | | pub trait Eq: PartialEq { - | --------------- required by this bound in `Eq` + | --------------- required by this bound in `std::cmp::Eq` | = help: the trait `PartialEq` is not implemented for `Foo` diff --git a/primitives/api/proc-macro/Cargo.toml b/primitives/api/proc-macro/Cargo.toml index 1df8c489e9148..d07285fe215a9 100644 --- a/primitives/api/proc-macro/Cargo.toml +++ b/primitives/api/proc-macro/Cargo.toml @@ -12,7 +12,6 @@ documentation = "https://docs.rs/sp-api-proc-macro" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] - [lib] proc-macro = true diff --git a/primitives/api/test/Cargo.toml b/primitives/api/test/Cargo.toml index 2a6325fd09e9a..5866d44bd479b 100644 --- a/primitives/api/test/Cargo.toml +++ b/primitives/api/test/Cargo.toml @@ -22,7 +22,7 @@ sp-consensus = { version = "0.9.0", path = "../../consensus/common" } sc-block-builder = { version = "0.9.0", path = "../../../client/block-builder" } codec = { package = "parity-scale-codec", version = "2.0.0" } sp-state-machine = { version = "0.9.0", path = "../../state-machine" } -trybuild = "1.0.38" +trybuild = "1.0.42" rustversion = "1.0.0" [dev-dependencies] diff --git a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr index fcda69533e3ad..6b00b7268672f 100644 --- a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr +++ b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr @@ -23,8 +23,8 @@ error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for tr 17 | sp_api::impl_runtime_apis! { | ^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `u64`, found struct `std::string::String` | - = note: expected fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> std::result::Result<_, _>` - found fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> std::result::Result<_, _>` + = note: expected fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> Result<_, _>` + found fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> Result<_, _>` = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) error[E0308]: mismatched types diff --git a/primitives/api/test/tests/ui/mock_only_self_reference.stderr b/primitives/api/test/tests/ui/mock_only_self_reference.stderr index 73cf936103798..83cfcf6ca1f9e 100644 --- a/primitives/api/test/tests/ui/mock_only_self_reference.stderr +++ b/primitives/api/test/tests/ui/mock_only_self_reference.stderr @@ -24,8 +24,8 @@ error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for tr 12 | sp_api::mock_impl_runtime_apis! { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `u64`, found `()` | - = note: expected fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option, Vec<_>) -> std::result::Result<_, _>` - found fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option<()>, Vec<_>) -> std::result::Result<_, _>` + = note: expected fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option, Vec<_>) -> Result<_, _>` + found fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option<()>, Vec<_>) -> Result<_, _>` = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) error[E0053]: method `Api_test2_runtime_api_impl` has an incompatible type for trait @@ -42,6 +42,6 @@ error[E0053]: method `Api_test2_runtime_api_impl` has an incompatible type for t 12 | sp_api::mock_impl_runtime_apis! { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `u64`, found `()` | - = note: expected fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option, Vec<_>) -> std::result::Result<_, _>` - found fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option<()>, Vec<_>) -> std::result::Result<_, _>` + = note: expected fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option, Vec<_>) -> Result<_, _>` + found fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option<()>, Vec<_>) -> Result<_, _>` = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr index 71f12b415a2b5..689723f8d7509 100644 --- a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr +++ b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr @@ -23,8 +23,8 @@ error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for tr 17 | sp_api::impl_runtime_apis! { | ^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `u64`, found `&u64` | - = note: expected fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> std::result::Result<_, _>` - found fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option<&u64>, Vec<_>) -> std::result::Result<_, _>` + = note: expected fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> Result<_, _>` + found fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option<&u64>, Vec<_>) -> Result<_, _>` = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) error[E0308]: mismatched types diff --git a/primitives/npos-elections/compact/Cargo.toml b/primitives/npos-elections/compact/Cargo.toml index 63432a36efc80..78432d777a019 100644 --- a/primitives/npos-elections/compact/Cargo.toml +++ b/primitives/npos-elections/compact/Cargo.toml @@ -24,4 +24,4 @@ proc-macro-crate = "1.0.0" parity-scale-codec = "2.0.1" sp-arithmetic = { path = "../../arithmetic" } sp-npos-elections = { path = ".." } -trybuild = "1.0.41" +trybuild = "1.0.42" diff --git a/primitives/runtime-interface/Cargo.toml b/primitives/runtime-interface/Cargo.toml index c4eb084f685c4..4099e89933880 100644 --- a/primitives/runtime-interface/Cargo.toml +++ b/primitives/runtime-interface/Cargo.toml @@ -31,7 +31,7 @@ sp-state-machine = { version = "0.9.0", path = "../state-machine" } sp-core = { version = "3.0.0", path = "../core" } sp-io = { version = "3.0.0", path = "../io" } rustversion = "1.0.0" -trybuild = "1.0.38" +trybuild = "1.0.42" [features] default = [ "std" ] diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index 0a8849fe98a72..24a794ff48025 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -18,4 +18,4 @@ tokio = { version = "0.2.13", features = ["macros"] } [dev-dependencies] sc-service = { version = "0.9.0", path = "../client/service" } -trybuild = { version = "1.0.38", features = [ "diff" ] } +trybuild = { version = "1.0.42", features = [ "diff" ] } From 41ab01a8cb2a43f7d743778c066ad91453e0c883 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Fri, 11 Jun 2021 16:45:13 +0100 Subject: [PATCH 30/61] Implement `transfer_all` in Balances Pallet (#9018) * transfer_all * benchmark * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_balances --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/balances/src/weights.rs --template=./.maintain/frame-weight-template.hbs * update * add note * typo Co-authored-by: Parity Bot Co-authored-by: Alexander Popiak --- frame/balances/src/benchmarking.rs | 24 +++++++++++++++--- frame/balances/src/lib.rs | 35 +++++++++++++++++++++++++- frame/balances/src/tests.rs | 40 ++++++++++++++++++++++++++++++ frame/balances/src/weights.rs | 33 ++++++++++++++++-------- 4 files changed, 117 insertions(+), 15 deletions(-) diff --git a/frame/balances/src/benchmarking.rs b/frame/balances/src/benchmarking.rs index f89775146b136..688bcbc262bdb 100644 --- a/frame/balances/src/benchmarking.rs +++ b/frame/balances/src/benchmarking.rs @@ -40,7 +40,7 @@ benchmarks_instance_pallet! { let existential_deposit = T::ExistentialDeposit::get(); let caller = whitelisted_caller(); - // Give some multiple of the existential deposit + creation fee + transfer fee + // Give some multiple of the existential deposit let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); let _ = as Currency<_>>::make_free_balance_be(&caller, balance); @@ -130,7 +130,7 @@ benchmarks_instance_pallet! { let source: T::AccountId = account("source", 0, SEED); let source_lookup: ::Source = T::Lookup::unlookup(source.clone()); - // Give some multiple of the existential deposit + creation fee + transfer fee + // Give some multiple of the existential deposit let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); let _ = as Currency<_>>::make_free_balance_be(&source, balance); @@ -154,7 +154,7 @@ benchmarks_instance_pallet! { let existential_deposit = T::ExistentialDeposit::get(); let caller = whitelisted_caller(); - // Give some multiple of the existential deposit + creation fee + transfer fee + // Give some multiple of the existential deposit let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); let _ = as Currency<_>>::make_free_balance_be(&caller, balance); @@ -176,6 +176,24 @@ benchmarks_instance_pallet! { assert_eq!(Balances::::free_balance(&caller), Zero::zero()); assert_eq!(Balances::::free_balance(&recipient), transfer_amount); } + + // Benchmark `transfer_all` with the worst possible condition: + // * The recipient account is created + // * The sender is killed + transfer_all { + let caller = whitelisted_caller(); + let recipient: T::AccountId = account("recipient", 0, SEED); + let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); + + // Give some multiple of the existential deposit + let existential_deposit = T::ExistentialDeposit::get(); + let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); + let _ = as Currency<_>>::make_free_balance_be(&caller, balance); + }: _(RawOrigin::Signed(caller.clone()), recipient_lookup, false) + verify { + assert!(Balances::::free_balance(&caller).is_zero()); + assert_eq!(Balances::::free_balance(&recipient), balance); + } } impl_benchmark_test_suite!( diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index 23c5cc97d0937..105c5d08a659c 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -369,6 +369,39 @@ pub mod pallet { >::transfer(&transactor, &dest, value, KeepAlive)?; Ok(().into()) } + + /// Transfer the entire transferable balance from the caller account. + /// + /// NOTE: This function only attempts to transfer _transferable_ balances. This means that + /// any locked, reserved, or existential deposits (when `keep_alive` is `true`), will not be + /// transferred by this function. To ensure that this function results in a killed account, + /// you might need to prepare the account by removing any reference counters, storage + /// deposits, etc... + /// + /// The dispatch origin of this call must be Signed. + /// + /// - `dest`: The recipient of the transfer. + /// - `keep_alive`: A boolean to determine if the `transfer_all` operation should send all + /// of the funds the account has, causing the sender account to be killed (false), or + /// transfer everything except at least the existential deposit, which will guarantee to + /// keep the sender account alive (true). + /// # + /// - O(1). Just like transfer, but reading the user's transferable balance first. + /// # + #[pallet::weight(T::WeightInfo::transfer_all())] + pub fn transfer_all( + origin: OriginFor, + dest: ::Source, + keep_alive: bool, + ) -> DispatchResultWithPostInfo { + use fungible::Inspect; + let transactor = ensure_signed(origin)?; + let reducible_balance = Self::reducible_balance(&transactor, keep_alive); + let dest = T::Lookup::lookup(dest)?; + let keep_alive = if keep_alive { KeepAlive } else { AllowDeath }; + >::transfer(&transactor, &dest, reducible_balance, keep_alive.into())?; + Ok(().into()) + } } #[pallet::event] @@ -1696,7 +1729,7 @@ impl, I: 'static> NamedReservableCurrency for Pallet< /// Is a no-op if value to be reserved is zero. fn reserve_named(id: &Self::ReserveIdentifier, who: &T::AccountId, value: Self::Balance) -> DispatchResult { if value.is_zero() { return Ok(()) } - + Reserves::::try_mutate(who, |reserves| -> DispatchResult { match reserves.binary_search_by_key(id, |data| data.id) { Ok(index) => { diff --git a/frame/balances/src/tests.rs b/frame/balances/src/tests.rs index 86004efcf68f6..3598595c7649c 100644 --- a/frame/balances/src/tests.rs +++ b/frame/balances/src/tests.rs @@ -965,6 +965,46 @@ macro_rules! decl_tests { }); } + #[test] + fn transfer_all_works() { + <$ext_builder>::default() + .existential_deposit(100) + .build() + .execute_with(|| { + // setup + assert_ok!(Balances::set_balance(Origin::root(), 1, 200, 0)); + assert_ok!(Balances::set_balance(Origin::root(), 2, 0, 0)); + // transfer all and allow death + assert_ok!(Balances::transfer_all(Some(1).into(), 2, false)); + assert_eq!(Balances::total_balance(&1), 0); + assert_eq!(Balances::total_balance(&2), 200); + + // setup + assert_ok!(Balances::set_balance(Origin::root(), 1, 200, 0)); + assert_ok!(Balances::set_balance(Origin::root(), 2, 0, 0)); + // transfer all and keep alive + assert_ok!(Balances::transfer_all(Some(1).into(), 2, true)); + assert_eq!(Balances::total_balance(&1), 100); + assert_eq!(Balances::total_balance(&2), 100); + + // setup + assert_ok!(Balances::set_balance(Origin::root(), 1, 200, 10)); + assert_ok!(Balances::set_balance(Origin::root(), 2, 0, 0)); + // transfer all and allow death w/ reserved + assert_ok!(Balances::transfer_all(Some(1).into(), 2, false)); + assert_eq!(Balances::total_balance(&1), 0); + assert_eq!(Balances::total_balance(&2), 200); + + // setup + assert_ok!(Balances::set_balance(Origin::root(), 1, 200, 10)); + assert_ok!(Balances::set_balance(Origin::root(), 2, 0, 0)); + // transfer all and keep alive w/ reserved + assert_ok!(Balances::transfer_all(Some(1).into(), 2, true)); + assert_eq!(Balances::total_balance(&1), 100); + assert_eq!(Balances::total_balance(&2), 110); + }); + } + #[test] fn named_reserve_should_work() { <$ext_builder>::default().build().execute_with(|| { diff --git a/frame/balances/src/weights.rs b/frame/balances/src/weights.rs index 5f3cf2b6bd9a9..cf1d7dff82848 100644 --- a/frame/balances/src/weights.rs +++ b/frame/balances/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_balances //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-04-08, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-06-04, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -49,63 +49,74 @@ pub trait WeightInfo { fn set_balance_creating() -> Weight; fn set_balance_killing() -> Weight; fn force_transfer() -> Weight; + fn transfer_all() -> Weight; } /// Weights for pallet_balances using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn transfer() -> Weight { - (81_909_000 as Weight) + (91_896_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn transfer_keep_alive() -> Weight { - (61_075_000 as Weight) + (67_779_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_balance_creating() -> Weight { - (32_255_000 as Weight) + (36_912_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_balance_killing() -> Weight { - (38_513_000 as Weight) + (44_416_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_transfer() -> Weight { - (80_448_000 as Weight) + (90_811_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } + fn transfer_all() -> Weight { + (84_170_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } } // For backwards compatibility and tests impl WeightInfo for () { fn transfer() -> Weight { - (81_909_000 as Weight) + (91_896_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn transfer_keep_alive() -> Weight { - (61_075_000 as Weight) + (67_779_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn set_balance_creating() -> Weight { - (32_255_000 as Weight) + (36_912_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn set_balance_killing() -> Weight { - (38_513_000 as Weight) + (44_416_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_transfer() -> Weight { - (80_448_000 as Weight) + (90_811_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } + fn transfer_all() -> Weight { + (84_170_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } } From b9f7b588c8ba35ede3bfcce955ddc4712377245e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 11 Jun 2021 18:24:30 +0100 Subject: [PATCH 31/61] Transaction pool: Remove futures-diagnose and thread pool (#9074) * Transaction pool: Remove futures-diagnose and thread pool This pr removes `futures-diagnose` as this isn't used anymore. Besides that the pr also removes the thread pool that was used to validate the transactions in the background. Instead of this thread pool we now spawn two separate long running tasks that we use to validate the transactions. All tasks of the transaction pool are now also spawned as essential tasks. This means, if any of these tasks is stopping, the node will stop as well. * Update client/transaction-pool/src/api.rs --- Cargo.lock | 17 ----- bin/node-template/node/src/service.rs | 4 +- bin/node/cli/src/service.rs | 4 +- client/transaction-pool/Cargo.toml | 1 - client/transaction-pool/src/api.rs | 71 ++++++++++++++------- client/transaction-pool/src/lib.rs | 22 ++++--- client/transaction-pool/src/testing/pool.rs | 12 +++- test-utils/test-runner/src/node.rs | 2 +- 8 files changed, 78 insertions(+), 55 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dc2c67ad1883a..4572ed354ab14 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2067,22 +2067,6 @@ dependencies = [ "num_cpus", ] -[[package]] -name = "futures-diagnose" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdcef58a173af8148b182684c9f2d5250875adbcaff7b5794073894f9d8634a9" -dependencies = [ - "futures 0.1.31", - "futures 0.3.15", - "lazy_static", - "log", - "parking_lot 0.9.0", - "pin-project 0.4.27", - "serde", - "serde_json", -] - [[package]] name = "futures-executor" version = "0.3.15" @@ -8215,7 +8199,6 @@ version = "3.0.0" dependencies = [ "assert_matches", "futures 0.3.15", - "futures-diagnose", "hex", "intervalier", "log", diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 8ed9c1ee50378..51b63e614fb8a 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -68,7 +68,7 @@ pub fn new_partial(config: &Configuration) -> Result Result let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light( config.transaction_pool.clone(), config.prometheus_registry(), - task_manager.spawn_handle(), + task_manager.spawn_essential_handle(), client.clone(), on_demand.clone(), )); diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index a9ac2ac8065f9..06e1fcc804773 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -90,7 +90,7 @@ pub fn new_partial( config.transaction_pool.clone(), config.role.is_authority().into(), config.prometheus_registry(), - task_manager.spawn_handle(), + task_manager.spawn_essential_handle(), client.clone(), ); @@ -471,7 +471,7 @@ pub fn new_light_base( let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light( config.transaction_pool.clone(), config.prometheus_registry(), - task_manager.spawn_handle(), + task_manager.spawn_essential_handle(), client.clone(), on_demand.clone(), )); diff --git a/client/transaction-pool/Cargo.toml b/client/transaction-pool/Cargo.toml index d457d709d1222..6b105520baec5 100644 --- a/client/transaction-pool/Cargo.toml +++ b/client/transaction-pool/Cargo.toml @@ -16,7 +16,6 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "2.0.0" } thiserror = "1.0.21" futures = { version = "0.3.1", features = ["compat"] } -futures-diagnose = "1.0" intervalier = "0.4.0" log = "0.4.8" parity-util-mem = { version = "0.9.0", default-features = false, features = ["primitive-types"] } diff --git a/client/transaction-pool/src/api.rs b/client/transaction-pool/src/api.rs index 09864f78248a3..74e08c3aa0589 100644 --- a/client/transaction-pool/src/api.rs +++ b/client/transaction-pool/src/api.rs @@ -21,7 +21,8 @@ use std::{marker::PhantomData, pin::Pin, sync::Arc}; use codec::{Decode, Encode}; use futures::{ - channel::oneshot, executor::{ThreadPool, ThreadPoolBuilder}, future::{Future, FutureExt, ready, Ready}, + channel::{oneshot, mpsc}, future::{Future, FutureExt, ready, Ready}, lock::Mutex, SinkExt, + StreamExt, }; use sc_client_api::{ @@ -34,15 +35,36 @@ use sp_runtime::{ use sp_transaction_pool::runtime_api::TaggedTransactionQueue; use sp_api::{ProvideRuntimeApi, ApiExt}; use prometheus_endpoint::Registry as PrometheusRegistry; +use sp_core::traits::SpawnEssentialNamed; use crate::{metrics::{ApiMetrics, ApiMetricsExt}, error::{self, Error}}; /// The transaction pool logic for full client. pub struct FullChainApi { client: Arc, - pool: ThreadPool, _marker: PhantomData, metrics: Option>, + validation_pool: Arc + Send>>>>>, +} + +/// Spawn a validation task that will be used by the transaction pool to validate transactions. +fn spawn_validation_pool_task( + name: &'static str, + receiver: Arc + Send>>>>>, + spawner: &impl SpawnEssentialNamed, +) { + spawner.spawn_essential_blocking( + name, + async move { + loop { + let task = receiver.lock().await.next().await; + match task { + None => return, + Some(task) => task.await, + } + } + }.boxed(), + ); } impl FullChainApi { @@ -50,6 +72,7 @@ impl FullChainApi { pub fn new( client: Arc, prometheus: Option<&PrometheusRegistry>, + spawner: &impl SpawnEssentialNamed, ) -> Self { let metrics = prometheus.map(ApiMetrics::register).and_then(|r| { match r { @@ -65,13 +88,15 @@ impl FullChainApi { } }); + let (sender, receiver) = mpsc::channel(0); + + let receiver = Arc::new(Mutex::new(receiver)); + spawn_validation_pool_task("transaction-pool-task-0", receiver.clone(), spawner); + spawn_validation_pool_task("transaction-pool-task-1", receiver, spawner); + FullChainApi { client, - pool: ThreadPoolBuilder::new() - .pool_size(2) - .name_prefix("txpool-verifier") - .create() - .expect("Failed to spawn verifier threads, that are critical for node operation."), + validation_pool: Arc::new(Mutex::new(sender)), _marker: Default::default(), metrics, } @@ -105,27 +130,29 @@ where let (tx, rx) = oneshot::channel(); let client = self.client.clone(); let at = at.clone(); - + let validation_pool = self.validation_pool.clone(); let metrics = self.metrics.clone(); - metrics.report(|m| m.validations_scheduled.inc()); - - self.pool.spawn_ok(futures_diagnose::diagnose( - "validate-transaction", - async move { - let res = validate_transaction_blocking(&*client, &at, source, uxt); - if let Err(e) = tx.send(res) { - log::warn!("Unable to send a validate transaction result: {:?}", e); - } - metrics.report(|m| m.validations_finished.inc()); - }, - )); - Box::pin(async move { + async move { + metrics.report(|m| m.validations_scheduled.inc()); + + validation_pool.lock() + .await + .send( + async move { + let res = validate_transaction_blocking(&*client, &at, source, uxt); + let _ = tx.send(res); + metrics.report(|m| m.validations_finished.inc()); + }.boxed() + ) + .await + .map_err(|e| Error::RuntimeApi(format!("Validation pool down: {:?}", e)))?; + match rx.await { Ok(r) => r, Err(_) => Err(Error::RuntimeApi("Validation was canceled".into())), } - }) + }.boxed() } fn block_id_to_number( diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index 0cd47f870d1af..15c75a554daa3 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -42,7 +42,7 @@ use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, NumberFor, AtLeast32Bit, Extrinsic, Zero, Header as HeaderT}, }; -use sp_core::traits::SpawnNamed; +use sp_core::traits::SpawnEssentialNamed; use sp_transaction_pool::{ TransactionPool, PoolStatus, ImportNotificationStream, TxHash, TransactionFor, TransactionStatusStreamFor, MaintainedTransactionPool, PoolFuture, ChainEvent, @@ -195,20 +195,26 @@ impl BasicPool pool_api: Arc, prometheus: Option<&PrometheusRegistry>, revalidation_type: RevalidationType, - spawner: impl SpawnNamed, + spawner: impl SpawnEssentialNamed, best_block_number: NumberFor, ) -> Self { let pool = Arc::new(sc_transaction_graph::Pool::new(options, is_validator, pool_api.clone())); let (revalidation_queue, background_task) = match revalidation_type { - RevalidationType::Light => (revalidation::RevalidationQueue::new(pool_api.clone(), pool.clone()), None), + RevalidationType::Light => ( + revalidation::RevalidationQueue::new(pool_api.clone(), pool.clone()), + None, + ), RevalidationType::Full => { - let (queue, background) = revalidation::RevalidationQueue::new_background(pool_api.clone(), pool.clone()); + let (queue, background) = revalidation::RevalidationQueue::new_background( + pool_api.clone(), + pool.clone(), + ); (queue, Some(background)) }, }; if let Some(background_task) = background_task { - spawner.spawn("txpool-background", background_task); + spawner.spawn_essential("txpool-background", background_task); } Self { @@ -357,7 +363,7 @@ where pub fn new_light( options: sc_transaction_graph::Options, prometheus: Option<&PrometheusRegistry>, - spawner: impl SpawnNamed, + spawner: impl SpawnEssentialNamed, client: Arc, fetcher: Arc, ) -> Self { @@ -393,10 +399,10 @@ where options: sc_transaction_graph::Options, is_validator: txpool::IsValidator, prometheus: Option<&PrometheusRegistry>, - spawner: impl SpawnNamed, + spawner: impl SpawnEssentialNamed, client: Arc, ) -> Arc { - let pool_api = Arc::new(FullChainApi::new(client.clone(), prometheus)); + let pool_api = Arc::new(FullChainApi::new(client.clone(), prometheus, &spawner)); let pool = Arc::new(Self::with_revalidation_type( options, is_validator, diff --git a/client/transaction-pool/src/testing/pool.rs b/client/transaction-pool/src/testing/pool.rs index 999d1ab65eb65..675a58cd44274 100644 --- a/client/transaction-pool/src/testing/pool.rs +++ b/client/transaction-pool/src/testing/pool.rs @@ -910,7 +910,11 @@ fn should_not_accept_old_signatures() { let client = Arc::new(substrate_test_runtime_client::new()); let pool = Arc::new( - BasicPool::new_test(Arc::new(FullChainApi::new(client, None))).0 + BasicPool::new_test(Arc::new(FullChainApi::new( + client, + None, + &sp_core::testing::TaskExecutor::new(), + ))).0 ); let transfer = Transfer { @@ -946,7 +950,11 @@ fn import_notification_to_pool_maintain_works() { let mut client = Arc::new(substrate_test_runtime_client::new()); let pool = Arc::new( - BasicPool::new_test(Arc::new(FullChainApi::new(client.clone(), None))).0 + BasicPool::new_test(Arc::new(FullChainApi::new( + client.clone(), + None, + &sp_core::testing::TaskExecutor::new(), + ))).0 ); // Prepare the extrisic, push it to the pool and check that it was added. diff --git a/test-utils/test-runner/src/node.rs b/test-utils/test-runner/src/node.rs index ce41e5b5b5200..00be12b651bcc 100644 --- a/test-utils/test-runner/src/node.rs +++ b/test-utils/test-runner/src/node.rs @@ -134,7 +134,7 @@ impl Node { config.transaction_pool.clone(), true.into(), config.prometheus_registry(), - task_manager.spawn_handle(), + task_manager.spawn_essential_handle(), client.clone(), ); From 561dbcff78fcbeb3848cbeb35c8f0397335d7df4 Mon Sep 17 00:00:00 2001 From: Shaun Wang Date: Sat, 12 Jun 2021 12:22:40 +1200 Subject: [PATCH 32/61] Migrate pallet-staking to pallet attribute macro (#9083) * Migrate staking pallet to pallet attribute macro. * HistoryDepth default value. * Make all calls public. * Update frame/staking/src/lib.rs * remove externalities again * Update lib.rs Co-authored-by: Shawn Tabrizi --- frame/babe/src/mock.rs | 2 +- frame/grandpa/src/mock.rs | 2 +- frame/offences/benchmarking/src/lib.rs | 2 +- frame/session/benchmarking/src/lib.rs | 9 +- frame/staking/src/benchmarking.rs | 34 +- frame/staking/src/lib.rs | 1065 ++++++++++++++---------- frame/staking/src/mock.rs | 13 +- frame/staking/src/slashing.rs | 52 +- frame/staking/src/testing_utils.rs | 6 +- frame/staking/src/tests.rs | 72 +- 10 files changed, 718 insertions(+), 539 deletions(-) diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index bd99531542471..236b975817ffd 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -28,7 +28,7 @@ use sp_runtime::{ use frame_system::InitKind; use frame_support::{ parameter_types, - traits::{KeyOwnerProofSystem, OnInitialize}, + traits::{KeyOwnerProofSystem, OnInitialize, GenesisBuild}, }; use sp_io; use sp_core::{H256, U256, crypto::{IsWrappedBy, KeyTypeId, Pair}}; diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index df55f6037e303..752d94ce19085 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -24,7 +24,7 @@ use ::grandpa as finality_grandpa; use codec::Encode; use frame_support::{ parameter_types, - traits::{KeyOwnerProofSystem, OnFinalize, OnInitialize}, + traits::{KeyOwnerProofSystem, OnFinalize, OnInitialize, GenesisBuild}, }; use pallet_staking::EraIndex; use sp_core::{crypto::KeyTypeId, H256}; diff --git a/frame/offences/benchmarking/src/lib.rs b/frame/offences/benchmarking/src/lib.rs index e27c66c75a669..e7c61bfd989b4 100644 --- a/frame/offences/benchmarking/src/lib.rs +++ b/frame/offences/benchmarking/src/lib.rs @@ -42,7 +42,7 @@ use pallet_offences::{Config as OffencesConfig, Pallet as Offences}; use pallet_session::historical::{Config as HistoricalConfig, IdentificationTuple}; use pallet_session::{Config as SessionConfig, SessionManager}; use pallet_staking::{ - Module as Staking, Config as StakingConfig, RewardDestination, ValidatorPrefs, Exposure, + Pallet as Staking, Config as StakingConfig, RewardDestination, ValidatorPrefs, Exposure, IndividualExposure, Event as StakingEvent, }; diff --git a/frame/session/benchmarking/src/lib.rs b/frame/session/benchmarking/src/lib.rs index fff3717607f8f..d9a50b431f2e7 100644 --- a/frame/session/benchmarking/src/lib.rs +++ b/frame/session/benchmarking/src/lib.rs @@ -28,7 +28,6 @@ use sp_std::vec; use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; use frame_support::{ codec::Decode, - storage::StorageValue, traits::{KeyOwnerProofSystem, OnInitialize}, }; use frame_system::RawOrigin; @@ -59,7 +58,7 @@ benchmarks! { false, RewardDestination::Staked, )?; - let v_controller = pallet_staking::Module::::bonded(&v_stash).ok_or("not stash")?; + let v_controller = pallet_staking::Pallet::::bonded(&v_stash).ok_or("not stash")?; let keys = T::Keys::default(); let proof: Vec = vec![0,1,2,3]; // Whitelist controller account from further DB operations. @@ -75,7 +74,7 @@ benchmarks! { false, RewardDestination::Staked )?; - let v_controller = pallet_staking::Module::::bonded(&v_stash).ok_or("not stash")?; + let v_controller = pallet_staking::Pallet::::bonded(&v_stash).ok_or("not stash")?; let keys = T::Keys::default(); let proof: Vec = vec![0,1,2,3]; Session::::set_keys(RawOrigin::Signed(v_controller.clone()).into(), keys, proof)?; @@ -125,7 +124,7 @@ fn check_membership_proof_setup( (sp_runtime::KeyTypeId, &'static [u8; 32]), sp_session::MembershipProof, ) { - pallet_staking::ValidatorCount::put(n); + pallet_staking::ValidatorCount::::put(n); // create validators and set random session keys for (n, who) in create_validators::(n, 1000) @@ -137,7 +136,7 @@ fn check_membership_proof_setup( use rand::SeedableRng; let validator = T::Lookup::lookup(who).unwrap(); - let controller = pallet_staking::Module::::bonded(validator).unwrap(); + let controller = pallet_staking::Pallet::::bonded(validator).unwrap(); let keys = { let mut keys = [0u8; 128]; diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index 1d8a5c1fd6451..800d3379d7e3c 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -18,7 +18,7 @@ //! Staking pallet benchmarking. use super::*; -use crate::Module as Staking; +use crate::Pallet as Staking; use testing_utils::*; use sp_runtime::traits::One; @@ -88,7 +88,7 @@ pub fn create_validator_with_nominators( } } - ValidatorCount::put(1); + ValidatorCount::::put(1); // Start a new Era let new_validators = Staking::::new_era(SessionIndex::one()).unwrap(); @@ -102,7 +102,7 @@ pub fn create_validator_with_nominators( individual: points_individual.into_iter().collect(), }; - let current_era = CurrentEra::get().unwrap(); + let current_era = CurrentEra::::get().unwrap(); ErasRewardPoints::::insert(current_era, reward); // Create reward pool @@ -164,7 +164,7 @@ benchmarks! { add_slashing_spans::(&stash, s); let amount = T::Currency::minimum_balance() * 5u32.into(); // Half of total Staking::::unbond(RawOrigin::Signed(controller.clone()).into(), amount)?; - CurrentEra::put(EraIndex::max_value()); + CurrentEra::::put(EraIndex::max_value()); let ledger = Ledger::::get(&controller).ok_or("ledger not created before")?; let original_total: BalanceOf = ledger.total; whitelist_account!(controller); @@ -183,7 +183,7 @@ benchmarks! { add_slashing_spans::(&stash, s); let amount = T::Currency::minimum_balance() * 10u32.into(); Staking::::unbond(RawOrigin::Signed(controller.clone()).into(), amount)?; - CurrentEra::put(EraIndex::max_value()); + CurrentEra::::put(EraIndex::max_value()); let ledger = Ledger::::get(&controller).ok_or("ledger not created before")?; let original_total: BalanceOf = ledger.total; whitelist_account!(controller); @@ -303,17 +303,17 @@ benchmarks! { let validator_count = MAX_VALIDATORS; }: _(RawOrigin::Root, validator_count) verify { - assert_eq!(ValidatorCount::get(), validator_count); + assert_eq!(ValidatorCount::::get(), validator_count); } force_no_eras {}: _(RawOrigin::Root) - verify { assert_eq!(ForceEra::get(), Forcing::ForceNone); } + verify { assert_eq!(ForceEra::::get(), Forcing::ForceNone); } force_new_era {}: _(RawOrigin::Root) - verify { assert_eq!(ForceEra::get(), Forcing::ForceNew); } + verify { assert_eq!(ForceEra::::get(), Forcing::ForceNew); } force_new_era_always {}: _(RawOrigin::Root) - verify { assert_eq!(ForceEra::get(), Forcing::ForceAlways); } + verify { assert_eq!(ForceEra::::get(), Forcing::ForceAlways); } // Worst case scenario, the list of invulnerables is very long. set_invulnerables { @@ -361,7 +361,7 @@ benchmarks! { RewardDestination::Controller, )?; - let current_era = CurrentEra::get().unwrap(); + let current_era = CurrentEra::::get().unwrap(); // set the commission for this particular era as well. >::insert(current_era, validator.clone(), >::validators(&validator)); @@ -394,7 +394,7 @@ benchmarks! { RewardDestination::Staked, )?; - let current_era = CurrentEra::get().unwrap(); + let current_era = CurrentEra::::get().unwrap(); // set the commission for this particular era as well. >::insert(current_era, validator.clone(), >::validators(&validator)); @@ -444,8 +444,8 @@ benchmarks! { set_history_depth { let e in 1 .. 100; - HistoryDepth::put(e); - CurrentEra::put(e); + HistoryDepth::::put(e); + CurrentEra::::put(e); for i in 0 .. e { >::insert(i, T::AccountId::default(), Exposure::>::default()); >::insert(i, T::AccountId::default(), Exposure::>::default()); @@ -453,11 +453,11 @@ benchmarks! { >::insert(i, BalanceOf::::one()); >::insert(i, EraRewardPoints::::default()); >::insert(i, BalanceOf::::one()); - ErasStartSessionIndex::insert(i, i); + ErasStartSessionIndex::::insert(i, i); } }: _(RawOrigin::Root, EraIndex::zero(), u32::max_value()) verify { - assert_eq!(HistoryDepth::get(), 0); + assert_eq!(HistoryDepth::::get(), 0); } reap_stash { @@ -503,7 +503,7 @@ benchmarks! { let new_validators = Staking::::new_era(SessionIndex::one()).unwrap(); assert!(new_validators.len() == v as usize); - let current_era = CurrentEra::get().unwrap(); + let current_era = CurrentEra::::get().unwrap(); let mut points_total = 0; let mut points_individual = Vec::new(); let mut payout_calls_arg = Vec::new(); @@ -636,7 +636,7 @@ mod tests { assert_eq!(nominators.len() as u32, n); - let current_era = CurrentEra::get().unwrap(); + let current_era = CurrentEra::::get().unwrap(); let original_free_balance = Balances::free_balance(&validator_stash); assert_ok!(Staking::payout_stakers(Origin::signed(1337), validator_stash, current_era)); diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 888601e307f35..49660350ba916 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -15,17 +15,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Staking Module +//! # Staking Pallet //! -//! The Staking module is used to manage funds at stake by network maintainers. +//! The Staking pallet is used to manage funds at stake by network maintainers. //! //! - [`Config`] //! - [`Call`] -//! - [`Module`] +//! - [`Pallet`] //! //! ## Overview //! -//! The Staking module is the means by which a set of network maintainers (known as _authorities_ in +//! The Staking pallet is the means by which a set of network maintainers (known as _authorities_ in //! some contexts and _validators_ in others) are chosen based upon those who voluntarily place //! funds under deposit. Under deposit, those funds are rewarded under normal operation but are held //! at pain of _slash_ (expropriation) should the staked maintainer be found not to be discharging @@ -59,7 +59,7 @@ //! //! #### Staking //! -//! Almost any interaction with the Staking module requires a process of _**bonding**_ (also known +//! Almost any interaction with the Staking pallet requires a process of _**bonding**_ (also known //! as being a _staker_). To become *bonded*, a fund-holding account known as the _stash account_, //! which holds some or all of the funds that become frozen in place as part of the staking process, //! is paired with an active **controller** account, which issues instructions on how they shall be @@ -102,7 +102,7 @@ //! //! #### Rewards and Slash //! -//! The **reward and slashing** procedure is the core of the Staking module, attempting to _embrace +//! The **reward and slashing** procedure is the core of the Staking pallet, attempting to _embrace //! valid behavior_ while _punishing any misbehavior or lack of availability_. //! //! Rewards must be claimed for each era before it gets too old by `$HISTORY_DEPTH` using the @@ -115,7 +115,7 @@ //! determined, a value is deducted from the balance of the validator and all the nominators who //! voted for this validator (values are deducted from the _stash_ account of the slashed entity). //! -//! Slashing logic is further described in the documentation of the `slashing` module. +//! Slashing logic is further described in the documentation of the `slashing` pallet. //! //! Similar to slashing, rewards are also shared among a validator and its associated nominators. //! Yet, the reward funds are not always transferred to the stash account and can be configured. See @@ -131,19 +131,19 @@ //! //! ### Session managing //! -//! The module implement the trait `SessionManager`. Which is the only API to query new validator +//! The pallet implement the trait `SessionManager`. Which is the only API to query new validator //! set and allowing these validator set to be rewarded once their era is ended. //! //! ## Interface //! //! ### Dispatchable Functions //! -//! The dispatchable functions of the Staking module enable the steps needed for entities to accept -//! and change their role, alongside some helper functions to get/set the metadata of the module. +//! The dispatchable functions of the Staking pallet enable the steps needed for entities to accept +//! and change their role, alongside some helper functions to get/set the metadata of the pallet. //! //! ### Public Functions //! -//! The Staking module contains many public storage items and (im)mutable functions. +//! The Staking pallet contains many public storage items and (im)mutable functions. //! //! ## Usage //! @@ -162,7 +162,7 @@ //! #[weight = 0] //! pub fn reward_myself(origin) -> dispatch::DispatchResult { //! let reported = ensure_signed(origin)?; -//! >::reward_by_ids(vec![(reported, 10)]); +//! >::reward_by_ids(vec![(reported, 10)]); //! Ok(()) //! } //! } @@ -198,9 +198,9 @@ //! //! Total reward is split among validators and their nominators depending on the number of points //! they received during the era. Points are added to a validator using -//! [`reward_by_ids`](Module::reward_by_ids). +//! [`reward_by_ids`](Pallet::reward_by_ids). //! -//! [`Module`] implements +//! [`Pallet`] implements //! [`pallet_authorship::EventHandler`] to add reward //! points to block producer and block producer of referenced uncles. //! @@ -255,14 +255,14 @@ //! //! ## GenesisConfig //! -//! The Staking module depends on the [`GenesisConfig`]. The +//! The Staking pallet depends on the [`GenesisConfig`]. The //! `GenesisConfig` is optional and allow to set some initial stakers. //! //! ## Related Modules //! //! - [Balances](../pallet_balances/index.html): Used to manage values at stake. //! - [Session](../pallet_session/index.html): Used to manage sessions. Also, a list of new -//! validators is stored in the Session module's `Validators` at the end of each era. +//! validators is stored in the Session pallet's `Validators` at the end of each era. #![recursion_limit = "128"] #![cfg_attr(not(feature = "std"), no_std)] @@ -288,13 +288,11 @@ use sp_std::{ }; use codec::{HasCompact, Encode, Decode}; use frame_support::{ - decl_module, decl_event, decl_storage, ensure, decl_error, + pallet_prelude::*, weights::{ Weight, WithPostDispatchInfo, constants::{WEIGHT_PER_MICROS, WEIGHT_PER_NANOS}, }, - storage::IterableStorageMap, - dispatch::{DispatchResult, DispatchResultWithPostInfo}, traits::{ Currency, LockIdentifier, LockableCurrency, WithdrawReasons, OnUnbalanced, Imbalance, Get, UnixTime, EstimateNextNewSession, EnsureOrigin, CurrencyToVote, @@ -314,11 +312,12 @@ use sp_staking::{ offence::{OnOffenceHandler, OffenceDetails, Offence, ReportOffence, OffenceError}, }; use frame_system::{ - self as system, ensure_signed, ensure_root, + ensure_signed, ensure_root, pallet_prelude::*, offchain::SendTransactionTypes, }; use frame_election_provider_support::{ElectionProvider, VoteWeight, Supports, data_provider}; pub use weights::WeightInfo; +pub use pallet::*; const STAKING_ID: LockIdentifier = *b"staking "; pub(crate) const LOG_TARGET: &'static str = "runtime::staking"; @@ -342,7 +341,7 @@ pub type EraIndex = u32; /// Counter for the number of "reward" points earned by a given validator. pub type RewardPoint = u32; -/// The balance type of this module. +/// The balance type of this pallet. pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; @@ -654,15 +653,15 @@ impl SessionInterface<::AccountId> for T w Convert<::AccountId, Option<::AccountId>>, { fn disable_validator(validator: &::AccountId) -> Result { - >::disable(validator) + >::disable(validator) } fn validators() -> Vec<::AccountId> { - >::validators() + >::validators() } fn prune_historical_up_to(up_to: SessionIndex) { - >::prune_up_to(up_to); + >::prune_up_to(up_to); } } @@ -713,82 +712,6 @@ impl< } } -pub trait Config: frame_system::Config + SendTransactionTypes> { - /// The staking balance. - type Currency: LockableCurrency; - - /// Time used for computing era duration. - /// - /// It is guaranteed to start being called from the first `on_finalize`. Thus value at genesis - /// is not used. - type UnixTime: UnixTime; - - /// Convert a balance into a number used for election calculation. This must fit into a `u64` - /// but is allowed to be sensibly lossy. The `u64` is used to communicate with the - /// [`sp_npos_elections`] crate which accepts u64 numbers and does operations in 128. - /// Consequently, the backward convert is used convert the u128s from sp-elections back to a - /// [`BalanceOf`]. - type CurrencyToVote: CurrencyToVote>; - - /// Something that provides the election functionality. - type ElectionProvider: frame_election_provider_support::ElectionProvider< - Self::AccountId, - Self::BlockNumber, - // we only accept an election provider that has staking as data provider. - DataProvider = Module, - >; - - /// Maximum number of nominations per nominator. - const MAX_NOMINATIONS: u32; - - /// Tokens have been minted and are unused for validator-reward. - /// See [Era payout](./index.html#era-payout). - type RewardRemainder: OnUnbalanced>; - - /// The overarching event type. - type Event: From> + Into<::Event>; - - /// Handler for the unbalanced reduction when slashing a staker. - type Slash: OnUnbalanced>; - - /// Handler for the unbalanced increment when rewarding a staker. - type Reward: OnUnbalanced>; - - /// Number of sessions per era. - type SessionsPerEra: Get; - - /// Number of eras that staked funds must remain bonded for. - type BondingDuration: Get; - - /// Number of eras that slashes are deferred by, after computation. - /// - /// This should be less than the bonding duration. Set to 0 if slashes - /// should be applied immediately, without opportunity for intervention. - type SlashDeferDuration: Get; - - /// The origin which can cancel a deferred slash. Root can always do this. - type SlashCancelOrigin: EnsureOrigin; - - /// Interface for interacting with a session module. - type SessionInterface: self::SessionInterface; - - /// The payout for validators and the system for the current era. - /// See [Era payout](./index.html#era-payout). - type EraPayout: EraPayout>; - - /// Something that can estimate the next session change, accurately or as a best effort guess. - type NextNewSession: EstimateNextNewSession; - - /// The maximum number of nominators rewarded for each validator. - /// - /// For each validator only the `$MaxNominatorRewardedPerValidator` biggest stakers can claim - /// their reward. This used to limit the i/o cost for the nominator payout. - type MaxNominatorRewardedPerValidator: Get; - - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; -} - /// Mode of era-forcing. #[derive(Copy, Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] @@ -828,183 +751,447 @@ impl Default for Releases { } } -decl_storage! { - trait Store for Module as Staking { - /// Number of eras to keep in history. - /// - /// Information is kept for eras in `[current_era - history_depth; current_era]`. - /// - /// Must be more than the number of eras delayed by session otherwise. I.e. active era must - /// always be in history. I.e. `active_era > current_era - history_depth` must be - /// guaranteed. - HistoryDepth get(fn history_depth) config(): u32 = 84; +pub mod migrations { + use super::*; - /// The ideal number of staking participants. - pub ValidatorCount get(fn validator_count) config(): u32; + pub mod v6 { + use super::*; + use frame_support::{traits::Get, weights::Weight, generate_storage_alias}; - /// Minimum number of staking participants before emergency conditions are imposed. - pub MinimumValidatorCount get(fn minimum_validator_count) config(): u32; + // NOTE: value type doesn't matter, we just set it to () here. + generate_storage_alias!(Staking, SnapshotValidators => Value<()>); + generate_storage_alias!(Staking, SnapshotNominators => Value<()>); + generate_storage_alias!(Staking, QueuedElected => Value<()>); + generate_storage_alias!(Staking, QueuedScore => Value<()>); + generate_storage_alias!(Staking, EraElectionStatus => Value<()>); + generate_storage_alias!(Staking, IsCurrentSessionFinal => Value<()>); - /// Any validators that may never be slashed or forcibly kicked. It's a Vec since they're - /// easy to initialize and the performance hit is minimal (we expect no more than four - /// invulnerables) and restricted to testnets. - pub Invulnerables get(fn invulnerables) config(): Vec; + /// check to execute prior to migration. + pub fn pre_migrate() -> Result<(), &'static str> { + // these may or may not exist. + log!(info, "SnapshotValidators.exits()? {:?}", SnapshotValidators::exists()); + log!(info, "SnapshotNominators.exits()? {:?}", SnapshotNominators::exists()); + log!(info, "QueuedElected.exits()? {:?}", QueuedElected::exists()); + log!(info, "QueuedScore.exits()? {:?}", QueuedScore::exists()); + // these must exist. + assert!(IsCurrentSessionFinal::exists(), "IsCurrentSessionFinal storage item not found!"); + assert!(EraElectionStatus::exists(), "EraElectionStatus storage item not found!"); + Ok(()) + } - /// Map from all locked "stash" accounts to the controller account. - pub Bonded get(fn bonded): map hasher(twox_64_concat) T::AccountId => Option; + /// Migrate storage to v6. + pub fn migrate() -> Weight { + log!(info, "Migrating staking to Releases::V6_0_0"); - /// Map from all (unlocked) "controller" accounts to the info regarding the staking. - pub Ledger get(fn ledger): - map hasher(blake2_128_concat) T::AccountId - => Option>>; + SnapshotValidators::kill(); + SnapshotNominators::kill(); + QueuedElected::kill(); + QueuedScore::kill(); + EraElectionStatus::kill(); + IsCurrentSessionFinal::kill(); - /// Where the reward payment should be made. Keyed by stash. - pub Payee get(fn payee): map hasher(twox_64_concat) T::AccountId => RewardDestination; + StorageVersion::::put(Releases::V6_0_0); + log!(info, "Done."); + T::DbWeight::get().writes(6 + 1) + } + } +} - /// The map from (wannabe) validator stash key to the preferences of that validator. - pub Validators get(fn validators): - map hasher(twox_64_concat) T::AccountId => ValidatorPrefs; +#[frame_support::pallet] +pub mod pallet { + use super::*; - /// The map from nominator stash key to the set of stash keys of all validators to nominate. - pub Nominators get(fn nominators): - map hasher(twox_64_concat) T::AccountId => Option>; + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); - /// The current era index. - /// - /// This is the latest planned era, depending on how the Session pallet queues the validator - /// set, it might be active or not. - pub CurrentEra get(fn current_era): Option; + #[pallet::config] + pub trait Config: frame_system::Config + SendTransactionTypes> { + /// The staking balance. + type Currency: LockableCurrency; - /// The active era information, it holds index and start. + /// Time used for computing era duration. /// - /// The active era is the era being currently rewarded. Validator set of this era must be - /// equal to [`SessionInterface::validators`]. - pub ActiveEra get(fn active_era): Option; + /// It is guaranteed to start being called from the first `on_finalize`. Thus value at genesis + /// is not used. + type UnixTime: UnixTime; - /// The session index at which the era start for the last `HISTORY_DEPTH` eras. - /// - /// Note: This tracks the starting session (i.e. session index when era start being active) - /// for the eras in `[CurrentEra - HISTORY_DEPTH, CurrentEra]`. - pub ErasStartSessionIndex get(fn eras_start_session_index): - map hasher(twox_64_concat) EraIndex => Option; + /// Convert a balance into a number used for election calculation. This must fit into a `u64` + /// but is allowed to be sensibly lossy. The `u64` is used to communicate with the + /// [`sp_npos_elections`] crate which accepts u64 numbers and does operations in 128. + /// Consequently, the backward convert is used convert the u128s from sp-elections back to a + /// [`BalanceOf`]. + type CurrencyToVote: CurrencyToVote>; - /// Exposure of validator at era. - /// - /// This is keyed first by the era index to allow bulk deletion and then the stash account. - /// - /// Is it removed after `HISTORY_DEPTH` eras. - /// If stakers hasn't been set or has been removed then empty exposure is returned. - pub ErasStakers get(fn eras_stakers): - double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId - => Exposure>; + /// Something that provides the election functionality. + type ElectionProvider: frame_election_provider_support::ElectionProvider< + Self::AccountId, + Self::BlockNumber, + // we only accept an election provider that has staking as data provider. + DataProvider = Pallet, + >; - /// Clipped Exposure of validator at era. - /// - /// This is similar to [`ErasStakers`] but number of nominators exposed is reduced to the - /// `T::MaxNominatorRewardedPerValidator` biggest stakers. - /// (Note: the field `total` and `own` of the exposure remains unchanged). - /// This is used to limit the i/o cost for the nominator payout. - /// - /// This is keyed fist by the era index to allow bulk deletion and then the stash account. - /// - /// Is it removed after `HISTORY_DEPTH` eras. - /// If stakers hasn't been set or has been removed then empty exposure is returned. - pub ErasStakersClipped get(fn eras_stakers_clipped): - double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId - => Exposure>; + /// Maximum number of nominations per nominator. + const MAX_NOMINATIONS: u32; - /// Similar to `ErasStakers`, this holds the preferences of validators. - /// - /// This is keyed first by the era index to allow bulk deletion and then the stash account. - /// - /// Is it removed after `HISTORY_DEPTH` eras. - // If prefs hasn't been set or has been removed then 0 commission is returned. - pub ErasValidatorPrefs get(fn eras_validator_prefs): - double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId - => ValidatorPrefs; + /// Tokens have been minted and are unused for validator-reward. + /// See [Era payout](./index.html#era-payout). + type RewardRemainder: OnUnbalanced>; - /// The total validator era payout for the last `HISTORY_DEPTH` eras. - /// - /// Eras that haven't finished yet or has been removed doesn't have reward. - pub ErasValidatorReward get(fn eras_validator_reward): - map hasher(twox_64_concat) EraIndex => Option>; + /// The overarching event type. + type Event: From> + IsType<::Event>; - /// Rewards for the last `HISTORY_DEPTH` eras. - /// If reward hasn't been set or has been removed then 0 reward is returned. - pub ErasRewardPoints get(fn eras_reward_points): - map hasher(twox_64_concat) EraIndex => EraRewardPoints; + /// Handler for the unbalanced reduction when slashing a staker. + type Slash: OnUnbalanced>; - /// The total amount staked for the last `HISTORY_DEPTH` eras. - /// If total hasn't been set or has been removed then 0 stake is returned. - pub ErasTotalStake get(fn eras_total_stake): - map hasher(twox_64_concat) EraIndex => BalanceOf; + /// Handler for the unbalanced increment when rewarding a staker. + type Reward: OnUnbalanced>; - /// Mode of era forcing. - pub ForceEra get(fn force_era) config(): Forcing; + /// Number of sessions per era. + #[pallet::constant] + type SessionsPerEra: Get; - /// The percentage of the slash that is distributed to reporters. + /// Number of eras that staked funds must remain bonded for. + #[pallet::constant] + type BondingDuration: Get; + + /// Number of eras that slashes are deferred by, after computation. /// - /// The rest of the slashed value is handled by the `Slash`. - pub SlashRewardFraction get(fn slash_reward_fraction) config(): Perbill; + /// This should be less than the bonding duration. Set to 0 if slashes + /// should be applied immediately, without opportunity for intervention. + #[pallet::constant] + type SlashDeferDuration: Get; + + /// The origin which can cancel a deferred slash. Root can always do this. + type SlashCancelOrigin: EnsureOrigin; - /// The amount of currency given to reporters of a slash event which was - /// canceled by extraordinary circumstances (e.g. governance). - pub CanceledSlashPayout get(fn canceled_payout) config(): BalanceOf; + /// Interface for interacting with a session pallet. + type SessionInterface: self::SessionInterface; - /// All unapplied slashes that are queued for later. - pub UnappliedSlashes: - map hasher(twox_64_concat) EraIndex => Vec>>; + /// The payout for validators and the system for the current era. + /// See [Era payout](./index.html#era-payout). + type EraPayout: EraPayout>; - /// A mapping from still-bonded eras to the first session index of that era. + /// Something that can estimate the next session change, accurately or as a best effort guess. + type NextNewSession: EstimateNextNewSession; + + /// The maximum number of nominators rewarded for each validator. /// - /// Must contains information for eras for the range: - /// `[active_era - bounding_duration; active_era]` - BondedEras: Vec<(EraIndex, SessionIndex)>; + /// For each validator only the `$MaxNominatorRewardedPerValidator` biggest stakers can claim + /// their reward. This used to limit the i/o cost for the nominator payout. + #[pallet::constant] + type MaxNominatorRewardedPerValidator: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + } + + #[pallet::extra_constants] + impl Pallet { + //TODO: rename to snake case after https://github.com/paritytech/substrate/issues/8826 fixed. + #[allow(non_snake_case)] + fn MaxNominations() -> u32 { + T::MAX_NOMINATIONS + } + } - /// All slashing events on validators, mapped by era to the highest slash proportion - /// and slash value of the era. - ValidatorSlashInEra: - double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId - => Option<(Perbill, BalanceOf)>; + #[pallet::type_value] + pub(crate) fn HistoryDepthOnEmpty() -> u32 { 84u32 } - /// All slashing events on nominators, mapped by era to the highest slash value of the era. - NominatorSlashInEra: - double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId - => Option>; + /// Number of eras to keep in history. + /// + /// Information is kept for eras in `[current_era - history_depth; current_era]`. + /// + /// Must be more than the number of eras delayed by session otherwise. I.e. active era must + /// always be in history. I.e. `active_era > current_era - history_depth` must be + /// guaranteed. + #[pallet::storage] + #[pallet::getter(fn history_depth)] + pub(crate) type HistoryDepth = StorageValue<_, u32, ValueQuery, HistoryDepthOnEmpty>; + + /// The ideal number of staking participants. + #[pallet::storage] + #[pallet::getter(fn validator_count)] + pub type ValidatorCount = StorageValue<_, u32, ValueQuery>; + + /// Minimum number of staking participants before emergency conditions are imposed. + #[pallet::storage] + #[pallet::getter(fn minimum_validator_count)] + pub type MinimumValidatorCount = StorageValue<_, u32, ValueQuery>; + + /// Any validators that may never be slashed or forcibly kicked. It's a Vec since they're + /// easy to initialize and the performance hit is minimal (we expect no more than four + /// invulnerables) and restricted to testnets. + #[pallet::storage] + #[pallet::getter(fn invulnerables)] + pub type Invulnerables = StorageValue<_, Vec, ValueQuery>; + + /// Map from all locked "stash" accounts to the controller account. + #[pallet::storage] + #[pallet::getter(fn bonded)] + pub type Bonded = StorageMap<_, Twox64Concat, T::AccountId, T::AccountId>; + + /// Map from all (unlocked) "controller" accounts to the info regarding the staking. + #[pallet::storage] + #[pallet::getter(fn ledger)] + pub type Ledger = StorageMap< + _, + Blake2_128Concat, T::AccountId, + StakingLedger>, + >; - /// Slashing spans for stash accounts. - SlashingSpans get(fn slashing_spans): map hasher(twox_64_concat) T::AccountId => Option; + /// Where the reward payment should be made. Keyed by stash. + #[pallet::storage] + #[pallet::getter(fn payee)] + pub type Payee = StorageMap< + _, + Twox64Concat, T::AccountId, + RewardDestination, + ValueQuery, + >; - /// Records information about the maximum slash of a stash within a slashing span, - /// as well as how much reward has been paid out. - SpanSlash: - map hasher(twox_64_concat) (T::AccountId, slashing::SpanIndex) - => slashing::SpanRecord>; + /// The map from (wannabe) validator stash key to the preferences of that validator. + #[pallet::storage] + #[pallet::getter(fn validators)] + pub type Validators = StorageMap<_, Twox64Concat, T::AccountId, ValidatorPrefs, ValueQuery>; - /// The earliest era for which we have a pending, unapplied slash. - EarliestUnappliedSlash: Option; + /// The map from nominator stash key to the set of stash keys of all validators to nominate. + #[pallet::storage] + #[pallet::getter(fn nominators)] + pub type Nominators = StorageMap<_, Twox64Concat, T::AccountId, Nominations>; - /// The last planned session scheduled by the session pallet. - /// - /// This is basically in sync with the call to [`SessionManager::new_session`]. - pub CurrentPlannedSession get(fn current_planned_session): SessionIndex; + /// The current era index. + /// + /// This is the latest planned era, depending on how the Session pallet queues the validator + /// set, it might be active or not. + #[pallet::storage] + #[pallet::getter(fn current_era)] + pub type CurrentEra = StorageValue<_, EraIndex>; - /// True if network has been upgraded to this version. - /// Storage version of the pallet. - /// - /// This is set to v6.0.0 for new networks. - StorageVersion build(|_: &GenesisConfig| Releases::V6_0_0): Releases; + /// The active era information, it holds index and start. + /// + /// The active era is the era being currently rewarded. Validator set of this era must be + /// equal to [`SessionInterface::validators`]. + #[pallet::storage] + #[pallet::getter(fn active_era)] + pub type ActiveEra = StorageValue<_, ActiveEraInfo>; + + /// The session index at which the era start for the last `HISTORY_DEPTH` eras. + /// + /// Note: This tracks the starting session (i.e. session index when era start being active) + /// for the eras in `[CurrentEra - HISTORY_DEPTH, CurrentEra]`. + #[pallet::storage] + #[pallet::getter(fn eras_start_session_index)] + pub type ErasStartSessionIndex = StorageMap<_, Twox64Concat, EraIndex, SessionIndex>; + + /// Exposure of validator at era. + /// + /// This is keyed first by the era index to allow bulk deletion and then the stash account. + /// + /// Is it removed after `HISTORY_DEPTH` eras. + /// If stakers hasn't been set or has been removed then empty exposure is returned. + #[pallet::storage] + #[pallet::getter(fn eras_stakers)] + pub type ErasStakers = StorageDoubleMap< + _, + Twox64Concat, EraIndex, + Twox64Concat, T::AccountId, + Exposure>, + ValueQuery, + >; + + /// Clipped Exposure of validator at era. + /// + /// This is similar to [`ErasStakers`] but number of nominators exposed is reduced to the + /// `T::MaxNominatorRewardedPerValidator` biggest stakers. + /// (Note: the field `total` and `own` of the exposure remains unchanged). + /// This is used to limit the i/o cost for the nominator payout. + /// + /// This is keyed fist by the era index to allow bulk deletion and then the stash account. + /// + /// Is it removed after `HISTORY_DEPTH` eras. + /// If stakers hasn't been set or has been removed then empty exposure is returned. + #[pallet::storage] + #[pallet::getter(fn eras_stakers_clipped)] + pub type ErasStakersClipped = StorageDoubleMap< + _, + Twox64Concat, EraIndex, + Twox64Concat, T::AccountId, + Exposure>, + ValueQuery, + >; + + /// Similar to `ErasStakers`, this holds the preferences of validators. + /// + /// This is keyed first by the era index to allow bulk deletion and then the stash account. + /// + /// Is it removed after `HISTORY_DEPTH` eras. + // If prefs hasn't been set or has been removed then 0 commission is returned. + #[pallet::storage] + #[pallet::getter(fn eras_validator_prefs)] + pub type ErasValidatorPrefs = StorageDoubleMap< + _, + Twox64Concat, EraIndex, + Twox64Concat, T::AccountId, + ValidatorPrefs, + ValueQuery, + >; + + /// The total validator era payout for the last `HISTORY_DEPTH` eras. + /// + /// Eras that haven't finished yet or has been removed doesn't have reward. + #[pallet::storage] + #[pallet::getter(fn eras_validator_reward)] + pub type ErasValidatorReward = StorageMap<_, Twox64Concat, EraIndex, BalanceOf>; + + /// Rewards for the last `HISTORY_DEPTH` eras. + /// If reward hasn't been set or has been removed then 0 reward is returned. + #[pallet::storage] + #[pallet::getter(fn eras_reward_points)] + pub type ErasRewardPoints = StorageMap< + _, + Twox64Concat, EraIndex, + EraRewardPoints, + ValueQuery, + >; + + /// The total amount staked for the last `HISTORY_DEPTH` eras. + /// If total hasn't been set or has been removed then 0 stake is returned. + #[pallet::storage] + #[pallet::getter(fn eras_total_stake)] + pub type ErasTotalStake = StorageMap<_, Twox64Concat, EraIndex, BalanceOf, ValueQuery>; + + /// Mode of era forcing. + #[pallet::storage] + #[pallet::getter(fn force_era)] + pub type ForceEra = StorageValue<_, Forcing, ValueQuery>; + + /// The percentage of the slash that is distributed to reporters. + /// + /// The rest of the slashed value is handled by the `Slash`. + #[pallet::storage] + #[pallet::getter(fn slash_reward_fraction)] + pub type SlashRewardFraction = StorageValue<_, Perbill, ValueQuery>; + + /// The amount of currency given to reporters of a slash event which was + /// canceled by extraordinary circumstances (e.g. governance). + #[pallet::storage] + #[pallet::getter(fn canceled_payout)] + pub type CanceledSlashPayout = StorageValue<_, BalanceOf, ValueQuery>; + + /// All unapplied slashes that are queued for later. + #[pallet::storage] + pub type UnappliedSlashes = StorageMap< + _, + Twox64Concat, EraIndex, + Vec>>, + ValueQuery, + >; + + /// A mapping from still-bonded eras to the first session index of that era. + /// + /// Must contains information for eras for the range: + /// `[active_era - bounding_duration; active_era]` + #[pallet::storage] + pub(crate) type BondedEras = StorageValue<_, Vec<(EraIndex, SessionIndex)>, ValueQuery>; + + /// All slashing events on validators, mapped by era to the highest slash proportion + /// and slash value of the era. + #[pallet::storage] + pub(crate) type ValidatorSlashInEra = StorageDoubleMap< + _, + Twox64Concat, EraIndex, + Twox64Concat, T::AccountId, + (Perbill, BalanceOf), + >; + + /// All slashing events on nominators, mapped by era to the highest slash value of the era. + #[pallet::storage] + pub(crate) type NominatorSlashInEra = StorageDoubleMap< + _, + Twox64Concat, EraIndex, + Twox64Concat, T::AccountId, + BalanceOf, + >; + + /// Slashing spans for stash accounts. + #[pallet::storage] + pub(crate) type SlashingSpans = StorageMap<_, Twox64Concat, T::AccountId, slashing::SlashingSpans>; + + /// Records information about the maximum slash of a stash within a slashing span, + /// as well as how much reward has been paid out. + #[pallet::storage] + pub(crate) type SpanSlash = StorageMap< + _, + Twox64Concat, (T::AccountId, slashing::SpanIndex), + slashing::SpanRecord>, + ValueQuery, + >; + + /// The earliest era for which we have a pending, unapplied slash. + #[pallet::storage] + pub(crate) type EarliestUnappliedSlash = StorageValue<_, EraIndex>; + + /// The last planned session scheduled by the session pallet. + /// + /// This is basically in sync with the call to [`SessionManager::new_session`]. + #[pallet::storage] + #[pallet::getter(fn current_planned_session)] + pub type CurrentPlannedSession = StorageValue<_, SessionIndex, ValueQuery>; + + /// True if network has been upgraded to this version. + /// Storage version of the pallet. + /// + /// This is set to v6.0.0 for new networks. + #[pallet::storage] + pub(crate) type StorageVersion = StorageValue<_, Releases, ValueQuery>; + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub history_depth: u32, + pub validator_count: u32, + pub minimum_validator_count: u32, + pub invulnerables: Vec, + pub force_era: Forcing, + pub slash_reward_fraction: Perbill, + pub canceled_payout: BalanceOf, + pub stakers: Vec<(T::AccountId, T::AccountId, BalanceOf, StakerStatus)>, } - add_extra_genesis { - config(stakers): - Vec<(T::AccountId, T::AccountId, BalanceOf, StakerStatus)>; - build(|config: &GenesisConfig| { - for &(ref stash, ref controller, balance, ref status) in &config.stakers { + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + GenesisConfig { + history_depth: 84u32, + validator_count: Default::default(), + minimum_validator_count: Default::default(), + invulnerables: Default::default(), + force_era: Default::default(), + slash_reward_fraction: Default::default(), + canceled_payout: Default::default(), + stakers: Default::default(), + } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + HistoryDepth::::put(self.history_depth); + ValidatorCount::::put(self.validator_count); + MinimumValidatorCount::::put(self.minimum_validator_count); + Invulnerables::::put(&self.invulnerables); + ForceEra::::put(self.force_era); + CanceledSlashPayout::::put(self.canceled_payout); + SlashRewardFraction::::put(self.slash_reward_fraction); + StorageVersion::::put(Releases::V6_0_0); + + for &(ref stash, ref controller, balance, ref status) in &self.stakers { assert!( T::Currency::free_balance(&stash) >= balance, "Stash does not have enough balance to bond." ); - let _ = >::bond( + let _ = >::bond( T::Origin::from(Some(stash.clone()).into()), T::Lookup::unlookup(controller.clone()), balance, @@ -1012,80 +1199,35 @@ decl_storage! { ); let _ = match status { StakerStatus::Validator => { - >::validate( + >::validate( T::Origin::from(Some(controller.clone()).into()), Default::default(), ) }, StakerStatus::Nominator(votes) => { - >::nominate( + >::nominate( T::Origin::from(Some(controller.clone()).into()), votes.iter().map(|l| T::Lookup::unlookup(l.clone())).collect(), ) }, _ => Ok(()) }; } - }); - } -} - -pub mod migrations { - use super::*; - - pub mod v6 { - use super::*; - use frame_support::{traits::Get, weights::Weight, generate_storage_alias}; - - // NOTE: value type doesn't matter, we just set it to () here. - generate_storage_alias!(Staking, SnapshotValidators => Value<()>); - generate_storage_alias!(Staking, SnapshotNominators => Value<()>); - generate_storage_alias!(Staking, QueuedElected => Value<()>); - generate_storage_alias!(Staking, QueuedScore => Value<()>); - generate_storage_alias!(Staking, EraElectionStatus => Value<()>); - generate_storage_alias!(Staking, IsCurrentSessionFinal => Value<()>); - - /// check to execute prior to migration. - pub fn pre_migrate() -> Result<(), &'static str> { - // these may or may not exist. - log!(info, "SnapshotValidators.exits()? {:?}", SnapshotValidators::exists()); - log!(info, "SnapshotNominators.exits()? {:?}", SnapshotNominators::exists()); - log!(info, "QueuedElected.exits()? {:?}", QueuedElected::exists()); - log!(info, "QueuedScore.exits()? {:?}", QueuedScore::exists()); - // these must exist. - assert!(IsCurrentSessionFinal::exists(), "IsCurrentSessionFinal storage item not found!"); - assert!(EraElectionStatus::exists(), "EraElectionStatus storage item not found!"); - Ok(()) - } - - /// Migrate storage to v6. - pub fn migrate() -> Weight { - log!(info, "Migrating staking to Releases::V6_0_0"); - - SnapshotValidators::kill(); - SnapshotNominators::kill(); - QueuedElected::kill(); - QueuedScore::kill(); - EraElectionStatus::kill(); - IsCurrentSessionFinal::kill(); - - StorageVersion::put(Releases::V6_0_0); - log!(info, "Done."); - T::DbWeight::get().writes(6 + 1) } } -} -decl_event!( - pub enum Event where Balance = BalanceOf, ::AccountId { + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + #[pallet::metadata(T::AccountId = "AccountId", BalanceOf = "Balance")] + pub enum Event { /// The era payout has been set; the first balance is the validator-payout; the second is /// the remainder from the maximum amount of reward. /// \[era_index, validator_payout, remainder\] - EraPayout(EraIndex, Balance, Balance), + EraPayout(EraIndex, BalanceOf, BalanceOf), /// The staker has been rewarded by this amount. \[stash, amount\] - Reward(AccountId, Balance), + Reward(T::AccountId, BalanceOf), /// One validator (and its nominators) has been slashed by the given amount. /// \[validator, amount\] - Slash(AccountId, Balance), + Slash(T::AccountId, BalanceOf), /// An old slashing report from a prior era was discarded because it could /// not be processed. \[session_index\] OldSlashingReportDiscarded(SessionIndex), @@ -1095,20 +1237,18 @@ decl_event!( /// /// NOTE: This event is only emitted when funds are bonded via a dispatchable. Notably, /// it will not be emitted for staking rewards when they are added to stake. - Bonded(AccountId, Balance), + Bonded(T::AccountId, BalanceOf), /// An account has unbonded this amount. \[stash, amount\] - Unbonded(AccountId, Balance), + Unbonded(T::AccountId, BalanceOf), /// An account has called `withdraw_unbonded` and removed unbonding chunks worth `Balance` /// from the unlocking queue. \[stash, amount\] - Withdrawn(AccountId, Balance), + Withdrawn(T::AccountId, BalanceOf), /// A nominator has been kicked from a validator. \[nominator, stash\] - Kicked(AccountId, AccountId), + Kicked(T::AccountId, T::AccountId), } -); -decl_error! { - /// Error for the staking module. - pub enum Error for Module { + #[pallet::error] + pub enum Error { /// Not a controller account. NotController, /// Not a stash account. @@ -1150,73 +1290,51 @@ decl_error! { /// A nomination target was supplied that was blocked or otherwise not a validator. BadTarget, } -} - -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - /// Number of sessions per era. - const SessionsPerEra: SessionIndex = T::SessionsPerEra::get(); - - /// Number of eras that staked funds must remain bonded for. - const BondingDuration: EraIndex = T::BondingDuration::get(); - - /// Number of eras that slashes are deferred by, after computation. - /// - /// This should be less than the bonding duration. - /// Set to 0 if slashes should be applied immediately, without opportunity for - /// intervention. - const SlashDeferDuration: EraIndex = T::SlashDeferDuration::get(); - - /// The maximum number of nominators rewarded for each validator. - /// - /// For each validator only the `$MaxNominatorRewardedPerValidator` biggest stakers can claim - /// their reward. This used to limit the i/o cost for the nominator payout. - const MaxNominatorRewardedPerValidator: u32 = T::MaxNominatorRewardedPerValidator::get(); - - /// Maximum number of nominations per nominator. - const MaxNominations: u32 = T::MAX_NOMINATIONS; - - type Error = Error; - - fn deposit_event() = default; + #[pallet::hooks] + impl Hooks> for Pallet { fn on_runtime_upgrade() -> Weight { - if StorageVersion::get() == Releases::V5_0_0 { + if StorageVersion::::get() == Releases::V5_0_0 { migrations::v6::migrate::() } else { T::DbWeight::get().reads(1) } } - fn on_initialize(_now: T::BlockNumber) -> Weight { + fn on_initialize(_now: BlockNumberFor) -> Weight { // just return the weight of the on_finalize. T::DbWeight::get().reads(1) } - fn on_finalize() { + fn on_finalize(_n: BlockNumberFor) { // Set the start of the first era. if let Some(mut active_era) = Self::active_era() { if active_era.start.is_none() { let now_as_millis_u64 = T::UnixTime::now().as_millis().saturated_into::(); active_era.start = Some(now_as_millis_u64); // This write only ever happens once, we don't include it in the weight in general - ActiveEra::put(active_era); + ActiveEra::::put(active_era); } } // `on_finalize` weight is tracked in `on_initialize` } fn integrity_test() { - sp_io::TestExternalities::new_empty().execute_with(|| - assert!( - T::SlashDeferDuration::get() < T::BondingDuration::get() || T::BondingDuration::get() == 0, - "As per documentation, slash defer duration ({}) should be less than bonding duration ({}).", - T::SlashDeferDuration::get(), - T::BondingDuration::get(), - ) - ); + sp_std::if_std! { + sp_io::TestExternalities::new_empty().execute_with(|| + assert!( + T::SlashDeferDuration::get() < T::BondingDuration::get() || T::BondingDuration::get() == 0, + "As per documentation, slash defer duration ({}) should be less than bonding duration ({}).", + T::SlashDeferDuration::get(), + T::BondingDuration::get(), + ) + ); + } } + } + #[pallet::call] + impl Pallet { /// Take the origin account as a stash and lock up `value` of its balance. `controller` will /// be the account that controls it. /// @@ -1239,12 +1357,13 @@ decl_module! { /// - Read: Bonded, Ledger, [Origin Account], Current Era, History Depth, Locks /// - Write: Bonded, Payee, [Origin Account], Locks, Ledger /// # - #[weight = T::WeightInfo::bond()] - pub fn bond(origin, + #[pallet::weight(T::WeightInfo::bond())] + pub fn bond( + origin: OriginFor, controller: ::Source, - #[compact] value: BalanceOf, + #[pallet::compact] value: BalanceOf, payee: RewardDestination, - ) { + ) -> DispatchResult { let stash = ensure_signed(origin)?; if >::contains_key(&stash) { @@ -1262,20 +1381,20 @@ decl_module! { Err(Error::::InsufficientValue)? } - system::Pallet::::inc_consumers(&stash).map_err(|_| Error::::BadState)?; + frame_system::Pallet::::inc_consumers(&stash).map_err(|_| Error::::BadState)?; // You're auto-bonded forever, here. We might improve this by only bonding when // you actually validate/nominate and remove once you unbond __everything__. >::insert(&stash, &controller); >::insert(&stash, payee); - let current_era = CurrentEra::get().unwrap_or(0); + let current_era = CurrentEra::::get().unwrap_or(0); let history_depth = Self::history_depth(); let last_reward_era = current_era.saturating_sub(history_depth); let stash_balance = T::Currency::free_balance(&stash); let value = value.min(stash_balance); - Self::deposit_event(RawEvent::Bonded(stash.clone(), value)); + Self::deposit_event(Event::::Bonded(stash.clone(), value)); let item = StakingLedger { stash, total: value, @@ -1284,6 +1403,7 @@ decl_module! { claimed_rewards: (last_reward_era..current_era).collect(), }; Self::update_ledger(&controller, &item); + Ok(()) } /// Add some extra amount that have appeared in the stash `free_balance` into the balance up @@ -1307,8 +1427,11 @@ decl_module! { /// - Read: Era Election Status, Bonded, Ledger, [Origin Account], Locks /// - Write: [Origin Account], Locks, Ledger /// # - #[weight = T::WeightInfo::bond_extra()] - fn bond_extra(origin, #[compact] max_additional: BalanceOf) { + #[pallet::weight(T::WeightInfo::bond_extra())] + pub fn bond_extra( + origin: OriginFor, + #[pallet::compact] max_additional: BalanceOf, + ) -> DispatchResult { let stash = ensure_signed(origin)?; let controller = Self::bonded(&stash).ok_or(Error::::NotStash)?; @@ -1322,9 +1445,10 @@ decl_module! { // last check: the new active amount of ledger must be more than ED. ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientValue); - Self::deposit_event(RawEvent::Bonded(stash, extra)); + Self::deposit_event(Event::::Bonded(stash, extra)); Self::update_ledger(&controller, &ledger); } + Ok(()) } /// Schedule a portion of the stash to be unlocked ready for transfer out after the bond @@ -1359,8 +1483,8 @@ decl_module! { /// - Read: EraElectionStatus, Ledger, CurrentEra, Locks, BalanceOf Stash, /// - Write: Locks, Ledger, BalanceOf Stash, /// - #[weight = T::WeightInfo::unbond()] - fn unbond(origin, #[compact] value: BalanceOf) { + #[pallet::weight(T::WeightInfo::unbond())] + pub fn unbond(origin: OriginFor, #[pallet::compact] value: BalanceOf) -> DispatchResult { let controller = ensure_signed(origin)?; let mut ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; ensure!( @@ -1383,8 +1507,9 @@ decl_module! { let era = Self::current_era().unwrap_or(0) + T::BondingDuration::get(); ledger.unlocking.push(UnlockChunk { value, era }); Self::update_ledger(&controller, &ledger); - Self::deposit_event(RawEvent::Unbonded(ledger.stash, value)); + Self::deposit_event(Event::::Unbonded(ledger.stash, value)); } + Ok(()) } /// Remove any unlocked chunks from the `unlocking` queue from our management. @@ -1418,8 +1543,11 @@ decl_module! { /// - Writes Each: SpanSlash * S /// NOTE: Weight annotation is the kill scenario, we refund otherwise. /// # - #[weight = T::WeightInfo::withdraw_unbonded_kill(*num_slashing_spans)] - fn withdraw_unbonded(origin, num_slashing_spans: u32) -> DispatchResultWithPostInfo { + #[pallet::weight(T::WeightInfo::withdraw_unbonded_kill(*num_slashing_spans))] + pub fn withdraw_unbonded( + origin: OriginFor, + num_slashing_spans: u32, + ) -> DispatchResultWithPostInfo { let controller = ensure_signed(origin)?; let mut ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; let (stash, old_total) = (ledger.stash.clone(), ledger.total); @@ -1449,7 +1577,7 @@ decl_module! { if ledger.total < old_total { // Already checked that this won't overflow by entry condition. let value = old_total - ledger.total; - Self::deposit_event(RawEvent::Withdrawn(stash, value)); + Self::deposit_event(Event::::Withdrawn(stash, value)); } Ok(post_info_weight.into()) @@ -1472,13 +1600,14 @@ decl_module! { /// - Read: Era Election Status, Ledger /// - Write: Nominators, Validators /// # - #[weight = T::WeightInfo::validate()] - pub fn validate(origin, prefs: ValidatorPrefs) { + #[pallet::weight(T::WeightInfo::validate())] + pub fn validate(origin: OriginFor, prefs: ValidatorPrefs) -> DispatchResult { let controller = ensure_signed(origin)?; let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; let stash = &ledger.stash; >::remove(stash); >::insert(stash, prefs); + Ok(()) } /// Declare the desire to nominate `targets` for the origin controller. @@ -1500,8 +1629,11 @@ decl_module! { /// - Reads: Era Election Status, Ledger, Current Era /// - Writes: Validators, Nominators /// # - #[weight = T::WeightInfo::nominate(targets.len() as u32)] - pub fn nominate(origin, targets: Vec<::Source>) { + #[pallet::weight(T::WeightInfo::nominate(targets.len() as u32))] + pub fn nominate( + origin: OriginFor, + targets: Vec<::Source>, + ) -> DispatchResult { let controller = ensure_signed(origin)?; let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; let stash = &ledger.stash; @@ -1528,6 +1660,7 @@ decl_module! { >::remove(stash); >::insert(stash, &nominations); + Ok(()) } /// Declare no desire to either validate or nominate. @@ -1547,11 +1680,12 @@ decl_module! { /// - Read: EraElectionStatus, Ledger /// - Write: Validators, Nominators /// # - #[weight = T::WeightInfo::chill()] - fn chill(origin) { + #[pallet::weight(T::WeightInfo::chill())] + pub fn chill(origin: OriginFor) -> DispatchResult { let controller = ensure_signed(origin)?; let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; Self::chill_stash(&ledger.stash); + Ok(()) } /// (Re-)set the payment target for a controller. @@ -1570,12 +1704,16 @@ decl_module! { /// - Read: Ledger /// - Write: Payee /// # - #[weight = T::WeightInfo::set_payee()] - fn set_payee(origin, payee: RewardDestination) { + #[pallet::weight(T::WeightInfo::set_payee())] + pub fn set_payee( + origin: OriginFor, + payee: RewardDestination, + ) -> DispatchResult { let controller = ensure_signed(origin)?; let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; let stash = &ledger.stash; >::insert(stash, payee); + Ok(()) } /// (Re-)set the controller of a stash. @@ -1594,8 +1732,11 @@ decl_module! { /// - Read: Bonded, Ledger New Controller, Ledger Old Controller /// - Write: Bonded, Ledger New Controller, Ledger Old Controller /// # - #[weight = T::WeightInfo::set_controller()] - fn set_controller(origin, controller: ::Source) { + #[pallet::weight(T::WeightInfo::set_controller())] + pub fn set_controller( + origin: OriginFor, + controller: ::Source, + ) -> DispatchResult { let stash = ensure_signed(origin)?; let old_controller = Self::bonded(&stash).ok_or(Error::::NotStash)?; let controller = T::Lookup::lookup(controller)?; @@ -1608,6 +1749,7 @@ decl_module! { >::insert(&controller, l); } } + Ok(()) } /// Sets the ideal number of validators. @@ -1618,10 +1760,14 @@ decl_module! { /// Weight: O(1) /// Write: Validator Count /// # - #[weight = T::WeightInfo::set_validator_count()] - fn set_validator_count(origin, #[compact] new: u32) { + #[pallet::weight(T::WeightInfo::set_validator_count())] + pub fn set_validator_count( + origin: OriginFor, + #[pallet::compact] new: u32, + ) -> DispatchResult { ensure_root(origin)?; - ValidatorCount::put(new); + ValidatorCount::::put(new); + Ok(()) } /// Increments the ideal number of validators. @@ -1631,10 +1777,14 @@ decl_module! { /// # /// Same as [`set_validator_count`]. /// # - #[weight = T::WeightInfo::set_validator_count()] - fn increase_validator_count(origin, #[compact] additional: u32) { + #[pallet::weight(T::WeightInfo::set_validator_count())] + pub fn increase_validator_count( + origin: OriginFor, + #[pallet::compact] additional: u32, + ) -> DispatchResult { ensure_root(origin)?; - ValidatorCount::mutate(|n| *n += additional); + ValidatorCount::::mutate(|n| *n += additional); + Ok(()) } /// Scale up the ideal number of validators by a factor. @@ -1644,10 +1794,11 @@ decl_module! { /// # /// Same as [`set_validator_count`]. /// # - #[weight = T::WeightInfo::set_validator_count()] - fn scale_validator_count(origin, factor: Percent) { + #[pallet::weight(T::WeightInfo::set_validator_count())] + pub fn scale_validator_count(origin: OriginFor, factor: Percent) -> DispatchResult { ensure_root(origin)?; - ValidatorCount::mutate(|n| *n += factor * *n); + ValidatorCount::::mutate(|n| *n += factor * *n); + Ok(()) } /// Force there to be no new eras indefinitely. @@ -1659,10 +1810,11 @@ decl_module! { /// - Weight: O(1) /// - Write: ForceEra /// # - #[weight = T::WeightInfo::force_no_eras()] - fn force_no_eras(origin) { + #[pallet::weight(T::WeightInfo::force_no_eras())] + pub fn force_no_eras(origin: OriginFor) -> DispatchResult { ensure_root(origin)?; - ForceEra::put(Forcing::ForceNone); + ForceEra::::put(Forcing::ForceNone); + Ok(()) } /// Force there to be a new era at the end of the next session. After this, it will be @@ -1675,10 +1827,11 @@ decl_module! { /// - Weight: O(1) /// - Write ForceEra /// # - #[weight = T::WeightInfo::force_new_era()] - fn force_new_era(origin) { + #[pallet::weight(T::WeightInfo::force_new_era())] + pub fn force_new_era(origin: OriginFor) -> DispatchResult { ensure_root(origin)?; - ForceEra::put(Forcing::ForceNew); + ForceEra::::put(Forcing::ForceNew); + Ok(()) } /// Set the validators who cannot be slashed (if any). @@ -1689,10 +1842,14 @@ decl_module! { /// - O(V) /// - Write: Invulnerables /// # - #[weight = T::WeightInfo::set_invulnerables(invulnerables.len() as u32)] - fn set_invulnerables(origin, invulnerables: Vec) { + #[pallet::weight(T::WeightInfo::set_invulnerables(invulnerables.len() as u32))] + pub fn set_invulnerables( + origin: OriginFor, + invulnerables: Vec, + ) -> DispatchResult { ensure_root(origin)?; >::put(invulnerables); + Ok(()) } /// Force a current staker to become completely unstaked, immediately. @@ -1705,8 +1862,12 @@ decl_module! { /// Writes: Bonded, Slashing Spans (if S > 0), Ledger, Payee, Validators, Nominators, Account, Locks /// Writes Each: SpanSlash * S /// # - #[weight = T::WeightInfo::force_unstake(*num_slashing_spans)] - fn force_unstake(origin, stash: T::AccountId, num_slashing_spans: u32) { + #[pallet::weight(T::WeightInfo::force_unstake(*num_slashing_spans))] + pub fn force_unstake( + origin: OriginFor, + stash: T::AccountId, + num_slashing_spans: u32, + ) -> DispatchResult { ensure_root(origin)?; // remove all staking-related information. @@ -1714,6 +1875,7 @@ decl_module! { // remove the lock. T::Currency::remove_lock(STAKING_ID, &stash); + Ok(()) } /// Force there to be a new era at the end of sessions indefinitely. @@ -1724,10 +1886,11 @@ decl_module! { /// - Weight: O(1) /// - Write: ForceEra /// # - #[weight = T::WeightInfo::force_new_era_always()] - fn force_new_era_always(origin) { + #[pallet::weight(T::WeightInfo::force_new_era_always())] + pub fn force_new_era_always(origin: OriginFor) -> DispatchResult { ensure_root(origin)?; - ForceEra::put(Forcing::ForceAlways); + ForceEra::::put(Forcing::ForceAlways); + Ok(()) } /// Cancel enactment of a deferred slash. @@ -1743,8 +1906,12 @@ decl_module! { /// - Read: Unapplied Slashes /// - Write: Unapplied Slashes /// # - #[weight = T::WeightInfo::cancel_deferred_slash(slash_indices.len() as u32)] - fn cancel_deferred_slash(origin, era: EraIndex, slash_indices: Vec) { + #[pallet::weight(T::WeightInfo::cancel_deferred_slash(slash_indices.len() as u32))] + pub fn cancel_deferred_slash( + origin: OriginFor, + era: EraIndex, + slash_indices: Vec, + ) -> DispatchResult { T::SlashCancelOrigin::ensure_origin(origin)?; ensure!(!slash_indices.is_empty(), Error::::EmptyTargets); @@ -1760,6 +1927,7 @@ decl_module! { } ::UnappliedSlashes::insert(&era, &unapplied); + Ok(()) } /// Pay out all the stakers behind a single validator for a single era. @@ -1790,8 +1958,12 @@ decl_module! { /// NOTE: weights are assuming that payouts are made to alive stash account (Staked). /// Paying even a dead controller is cheaper weight-wise. We don't do any refunds here. /// # - #[weight = T::WeightInfo::payout_stakers_alive_staked(T::MaxNominatorRewardedPerValidator::get())] - fn payout_stakers(origin, validator_stash: T::AccountId, era: EraIndex) -> DispatchResultWithPostInfo { + #[pallet::weight(T::WeightInfo::payout_stakers_alive_staked(T::MaxNominatorRewardedPerValidator::get()))] + pub(super) fn payout_stakers( + origin: OriginFor, + validator_stash: T::AccountId, + era: EraIndex, + ) -> DispatchResultWithPostInfo { ensure_signed(origin)?; Self::do_payout_stakers(validator_stash, era) } @@ -1810,8 +1982,11 @@ decl_module! { /// - Reads: EraElectionStatus, Ledger, Locks, [Origin Account] /// - Writes: [Origin Account], Locks, Ledger /// # - #[weight = T::WeightInfo::rebond(MAX_UNLOCKING_CHUNKS as u32)] - fn rebond(origin, #[compact] value: BalanceOf) -> DispatchResultWithPostInfo { + #[pallet::weight(T::WeightInfo::rebond(MAX_UNLOCKING_CHUNKS as u32))] + pub fn rebond( + origin: OriginFor, + #[pallet::compact] value: BalanceOf, + ) -> DispatchResultWithPostInfo { let controller = ensure_signed(origin)?; let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; ensure!(!ledger.unlocking.is_empty(), Error::::NoUnlockChunk); @@ -1820,7 +1995,7 @@ decl_module! { // last check: the new active amount of ledger must be more than ED. ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientValue); - Self::deposit_event(RawEvent::Bonded(ledger.stash.clone(), value)); + Self::deposit_event(Event::::Bonded(ledger.stash.clone(), value)); Self::update_ledger(&controller, &ledger); Ok(Some( 35 * WEIGHT_PER_MICROS @@ -1850,14 +2025,14 @@ decl_module! { /// - Clear Prefix Each: Era Stakers, EraStakersClipped, ErasValidatorPrefs /// - Writes Each: ErasValidatorReward, ErasRewardPoints, ErasTotalStake, ErasStartSessionIndex /// # - #[weight = T::WeightInfo::set_history_depth(*_era_items_deleted)] - fn set_history_depth(origin, - #[compact] new_history_depth: EraIndex, - #[compact] _era_items_deleted: u32, - ) { + #[pallet::weight(T::WeightInfo::set_history_depth(*_era_items_deleted))] + pub fn set_history_depth(origin: OriginFor, + #[pallet::compact] new_history_depth: EraIndex, + #[pallet::compact] _era_items_deleted: u32, + ) -> DispatchResult { ensure_root(origin)?; if let Some(current_era) = Self::current_era() { - HistoryDepth::mutate(|history_depth| { + HistoryDepth::::mutate(|history_depth| { let last_kept = current_era.checked_sub(*history_depth).unwrap_or(0); let new_last_kept = current_era.checked_sub(new_history_depth).unwrap_or(0); for era_index in last_kept..new_last_kept { @@ -1866,6 +2041,7 @@ decl_module! { *history_depth = new_history_depth }) } + Ok(()) } /// Remove all data structure concerning a staker/stash once its balance is at the minimum. @@ -1883,12 +2059,17 @@ decl_module! { /// - Writes: Bonded, Slashing Spans (if S > 0), Ledger, Payee, Validators, Nominators, Stash Account, Locks /// - Writes Each: SpanSlash * S /// # - #[weight = T::WeightInfo::reap_stash(*num_slashing_spans)] - fn reap_stash(_origin, stash: T::AccountId, num_slashing_spans: u32) { + #[pallet::weight(T::WeightInfo::reap_stash(*num_slashing_spans))] + pub fn reap_stash( + _origin: OriginFor, + stash: T::AccountId, + num_slashing_spans: u32, + ) -> DispatchResult { let at_minimum = T::Currency::total_balance(&stash) == T::Currency::minimum_balance(); ensure!(at_minimum, Error::::FundedTarget); Self::kill_stash(&stash, num_slashing_spans)?; T::Currency::remove_lock(STAKING_ID, &stash); + Ok(()) } /// Remove the given nominations from the calling validator. @@ -1904,8 +2085,8 @@ decl_module! { /// /// Note: Making this call only makes sense if you first set the validator preferences to /// block any further nominations. - #[weight = T::WeightInfo::kick(who.len() as u32)] - pub fn kick(origin, who: Vec<::Source>) -> DispatchResult { + #[pallet::weight(T::WeightInfo::kick(who.len() as u32))] + pub fn kick(origin: OriginFor, who: Vec<::Source>) -> DispatchResult { let controller = ensure_signed(origin)?; let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; let stash = &ledger.stash; @@ -1918,7 +2099,7 @@ decl_module! { Nominators::::mutate(&nom_stash, |maybe_nom| if let Some(ref mut nom) = maybe_nom { if let Some(pos) = nom.targets.iter().position(|v| v == stash) { nom.targets.swap_remove(pos); - Self::deposit_event(RawEvent::Kicked(nom_stash.clone(), stash.clone())); + Self::deposit_event(Event::::Kicked(nom_stash.clone(), stash.clone())); } }); } @@ -1928,7 +2109,7 @@ decl_module! { } } -impl Module { +impl Pallet { /// The total balance that can be slashed from a stash account as of right now. pub fn slashable_balance_of(stash: &T::AccountId) -> BalanceOf { // Weight note: consider making the stake accessible through stash. @@ -1948,7 +2129,7 @@ impl Module { /// This prevents call sites from repeatedly requesting `total_issuance` from backend. But it is /// important to be only used while the total issuance is not changing. pub fn slashable_balance_of_fn() -> Box VoteWeight> { - // NOTE: changing this to unboxed `impl Fn(..)` return type and the module will still + // NOTE: changing this to unboxed `impl Fn(..)` return type and the pallet will still // compile, while some types in mock fail to resolve. let issuance = T::Currency::total_issuance(); Box::new(move |who: &T::AccountId| -> VoteWeight { @@ -1958,7 +2139,7 @@ impl Module { fn do_payout_stakers(validator_stash: T::AccountId, era: EraIndex) -> DispatchResultWithPostInfo { // Validate input data - let current_era = CurrentEra::get().ok_or( + let current_era = CurrentEra::::get().ok_or( Error::::InvalidEraToReward.with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) )?; let history_depth = Self::history_depth(); @@ -2040,7 +2221,7 @@ impl Module { &ledger.stash, validator_staking_payout + validator_commission_payout ) { - Self::deposit_event(RawEvent::Reward(ledger.stash, imbalance.peek())); + Self::deposit_event(Event::::Reward(ledger.stash, imbalance.peek())); } // Track the number of payout ops to nominators. Note: `WeightInfo::payout_stakers_alive_staked` @@ -2060,7 +2241,7 @@ impl Module { if let Some(imbalance) = Self::make_payout(&nominator.who, nominator_reward) { // Note: this logic does not count payouts for `RewardDestination::None`. nominator_payout_count += 1; - Self::deposit_event(RawEvent::Reward(nominator.who.clone(), imbalance.peek())); + Self::deposit_event(Event::::Reward(nominator.who.clone(), imbalance.peek())); } } @@ -2131,9 +2312,9 @@ impl Module { let era_length = session_index.checked_sub(current_era_start_session_index) .unwrap_or(0); // Must never happen. - match ForceEra::get() { + match ForceEra::::get() { // Will set to default again, which is `NotForcing`. - Forcing::ForceNew => ForceEra::kill(), + Forcing::ForceNew => ForceEra::::kill(), // Short circuit to `new_era`. Forcing::ForceAlways => (), // Only go to `new_era` if deadline reached. @@ -2191,7 +2372,7 @@ impl Module { /// * reset `active_era.start`, /// * update `BondedEras` and apply slashes. fn start_era(start_session: SessionIndex) { - let active_era = ActiveEra::mutate(|active_era| { + let active_era = ActiveEra::::mutate(|active_era| { let new_index = active_era.as_ref().map(|info| info.index + 1).unwrap_or(0); *active_era = Some(ActiveEraInfo { index: new_index, @@ -2203,7 +2384,7 @@ impl Module { let bonding_duration = T::BondingDuration::get(); - BondedEras::mutate(|bonded| { + BondedEras::::mutate(|bonded| { bonded.push((active_era, start_session)); if active_era > bonding_duration { @@ -2239,7 +2420,7 @@ impl Module { let issuance = T::Currency::total_issuance(); let (validator_payout, rest) = T::EraPayout::era_payout(staked, issuance, era_duration); - Self::deposit_event(RawEvent::EraPayout(active_era.index, validator_payout, rest)); + Self::deposit_event(Event::::EraPayout(active_era.index, validator_payout, rest)); // Set ending era reward. >::insert(&active_era.index, validator_payout); @@ -2250,11 +2431,11 @@ impl Module { /// Plan a new era. Return the potential new staking set. fn new_era(start_session_index: SessionIndex) -> Option> { // Increment or set current era. - let current_era = CurrentEra::mutate(|s| { + let current_era = CurrentEra::::mutate(|s| { *s = Some(s.map(|s| s + 1).unwrap_or(0)); s.unwrap() }); - ErasStartSessionIndex::insert(¤t_era, &start_session_index); + ErasStartSessionIndex::::insert(¤t_era, &start_session_index); // Clean old era information. if let Some(old_era) = current_era.checked_sub(Self::history_depth() + 1) { @@ -2338,7 +2519,7 @@ impl Module { } // emit event - Self::deposit_event(RawEvent::StakingElection); + Self::deposit_event(Event::::StakingElection); if current_era > 0 { log!( @@ -2407,7 +2588,7 @@ impl Module { >::remove(stash); >::remove(stash); - system::Pallet::::dec_consumers(stash); + frame_system::Pallet::::dec_consumers(stash); Ok(()) } @@ -2420,7 +2601,7 @@ impl Module { >::remove(era_index); >::remove(era_index); >::remove(era_index); - ErasStartSessionIndex::remove(era_index); + ErasStartSessionIndex::::remove(era_index); } /// Apply previously-unapplied slashes on the beginning of a new era, after a delay. @@ -2465,9 +2646,9 @@ impl Module { /// Ensures that at the end of the current session there will be a new era. fn ensure_new_era() { - match ForceEra::get() { + match ForceEra::::get() { Forcing::ForceAlways | Forcing::ForceNew => (), - _ => ForceEra::put(Forcing::ForceNew), + _ => ForceEra::::put(Forcing::ForceNew), } } @@ -2482,7 +2663,7 @@ impl Module { #[cfg(feature = "runtime-benchmarks")] pub fn set_slash_reward_fraction(fraction: Perbill) { - SlashRewardFraction::put(fraction); + SlashRewardFraction::::put(fraction); } /// Get all of the voters that are eligible for the npos election. @@ -2534,7 +2715,7 @@ impl Module { } impl frame_election_provider_support::ElectionDataProvider - for Module + for Pallet { const MAXIMUM_VOTES_PER_VOTER: u32 = T::MAX_NOMINATIONS; fn desired_targets() -> data_provider::Result<(u32, Weight)> { @@ -2658,10 +2839,10 @@ impl frame_election_provider_support::ElectionDataProvider pallet_session::SessionManager for Module { +impl pallet_session::SessionManager for Pallet { fn new_session(new_index: SessionIndex) -> Option> { log!(trace, "planning new_session({})", new_index); - CurrentPlannedSession::put(new_index); + CurrentPlannedSession::::put(new_index); Self::new_session(new_index) } fn start_session(start_index: SessionIndex) { @@ -2675,7 +2856,7 @@ impl pallet_session::SessionManager for Module { } impl historical::SessionManager>> - for Module + for Pallet { fn new_session( new_index: SessionIndex, @@ -2703,7 +2884,7 @@ impl historical::SessionManager pallet_authorship::EventHandler for Module +impl pallet_authorship::EventHandler for Pallet where T: Config + pallet_authorship::Config + pallet_session::Config, { @@ -2724,7 +2905,7 @@ pub struct StashOf(sp_std::marker::PhantomData); impl Convert> for StashOf { fn convert(controller: T::AccountId) -> Option { - >::ledger(&controller).map(|l| l.stash) + >::ledger(&controller).map(|l| l.stash) } } @@ -2739,15 +2920,15 @@ impl Convert for ExposureOf { fn convert(validator: T::AccountId) -> Option>> { - >::active_era() - .map(|active_era| >::eras_stakers(active_era.index, &validator)) + >::active_era() + .map(|active_era| >::eras_stakers(active_era.index, &validator)) } } /// This is intended to be used with `FilterHistoricalOffences`. impl OnOffenceHandler, Weight> - for Module + for Pallet where T: pallet_session::Config::AccountId>, T: pallet_session::historical::Config< @@ -2769,7 +2950,7 @@ where slash_fraction: &[Perbill], slash_session: SessionIndex, ) -> Weight { - let reward_proportion = SlashRewardFraction::get(); + let reward_proportion = SlashRewardFraction::::get(); let mut consumed_weight: Weight = 0; let mut add_db_reads_writes = |reads, writes| { consumed_weight += T::DbWeight::get().reads_writes(reads, writes); @@ -2798,7 +2979,7 @@ where let slash_era = if slash_session >= active_era_start_session_index { active_era } else { - let eras = BondedEras::get(); + let eras = BondedEras::::get(); add_db_reads_writes(1, 0); // reverse because it's more likely to find reports from recent eras. @@ -2883,7 +3064,7 @@ pub struct FilterHistoricalOffences { } impl ReportOffence - for FilterHistoricalOffences, R> + for FilterHistoricalOffences, R> where T: Config, R: ReportOffence, @@ -2892,13 +3073,13 @@ where fn report_offence(reporters: Vec, offence: O) -> Result<(), OffenceError> { // disallow any slashing from before the current bonding period. let offence_session = offence.session_index(); - let bonded_eras = BondedEras::get(); + let bonded_eras = BondedEras::::get(); if bonded_eras.first().filter(|(_, start)| offence_session >= *start).is_some() { R::report_offence(reporters, offence) } else { - >::deposit_event( - RawEvent::OldSlashingReportDiscarded(offence_session) + >::deposit_event( + Event::::OldSlashingReportDiscarded(offence_session) ); Ok(()) } diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index b4ff35d0d6f90..211cc025300e0 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -21,9 +21,8 @@ use crate::*; use crate as staking; use frame_support::{ assert_ok, parameter_types, - traits::{Currency, FindAuthor, Get, OnFinalize, OnInitialize, OneSessionHandler}, + traits::{Currency, FindAuthor, Get, OnInitialize, OneSessionHandler}, weights::constants::RocksDbWeight, - IterableStorageMap, StorageDoubleMap, StorageMap, StorageValue, }; use sp_core::H256; use sp_io; @@ -194,7 +193,7 @@ impl pallet_authorship::Config for Test { type FindAuthor = Author11; type UncleGenerations = UncleGenerations; type FilterUncle = (); - type EventHandler = Module; + type EventHandler = Pallet; } parameter_types! { pub const MinimumPeriod: u64 = 5; @@ -459,7 +458,7 @@ impl ExtBuilder { ext.execute_with(|| { System::set_block_number(1); Session::on_initialize(1); - Staking::on_initialize(1); + >::on_initialize(1); Timestamp::set_timestamp(INIT_TIMESTAMP); }); } @@ -610,7 +609,7 @@ pub(crate) fn run_to_block(n: BlockNumber) { for b in (System::block_number() + 1)..=n { System::set_block_number(b); Session::on_initialize(b); - Staking::on_initialize(b); + >::on_initialize(b); Timestamp::set_timestamp(System::block_number() * BLOCK_TIME + INIT_TIMESTAMP); if b != n { Staking::on_finalize(System::block_number()); @@ -696,7 +695,7 @@ pub(crate) fn reward_all_elected() { .into_iter() .map(|v| (v, 1)); - >::reward_by_ids(rewards) + >::reward_by_ids(rewards) } pub(crate) fn validator_controllers() -> Vec { @@ -714,7 +713,7 @@ pub(crate) fn on_offence_in_era( slash_fraction: &[Perbill], era: EraIndex, ) { - let bonded_eras = crate::BondedEras::get(); + let bonded_eras = crate::BondedEras::::get(); for &(bonded_era, start_session) in bonded_eras.iter() { if bonded_era == era { let _ = Staking::on_offence(offenders, slash_fraction, start_session); diff --git a/frame/staking/src/slashing.rs b/frame/staking/src/slashing.rs index fd0a63b288ab2..50cab1103b95a 100644 --- a/frame/staking/src/slashing.rs +++ b/frame/staking/src/slashing.rs @@ -50,12 +50,12 @@ //! Based on research at use super::{ - EraIndex, Config, Module, Store, BalanceOf, Exposure, Perbill, SessionInterface, + EraIndex, Config, Pallet, Store, BalanceOf, Exposure, Perbill, SessionInterface, NegativeImbalanceOf, UnappliedSlash, Error, }; use sp_runtime::{traits::{Zero, Saturating}, RuntimeDebug, DispatchResult}; use frame_support::{ - StorageMap, StorageDoubleMap, ensure, + ensure, traits::{Currency, OnUnbalanced, Imbalance}, }; use sp_std::vec::Vec; @@ -239,7 +239,7 @@ pub(crate) fn compute_slash(params: SlashParams) return None; } - let (prior_slash_p, _era_slash) = as Store>::ValidatorSlashInEra::get( + let (prior_slash_p, _era_slash) = as Store>::ValidatorSlashInEra::get( &slash_era, stash, ).unwrap_or((Perbill::zero(), Zero::zero())); @@ -247,7 +247,7 @@ pub(crate) fn compute_slash(params: SlashParams) // compare slash proportions rather than slash values to avoid issues due to rounding // error. if slash.deconstruct() > prior_slash_p.deconstruct() { - as Store>::ValidatorSlashInEra::insert( + as Store>::ValidatorSlashInEra::insert( &slash_era, stash, &(slash, own_slash), @@ -285,12 +285,12 @@ pub(crate) fn compute_slash(params: SlashParams) // chill the validator - it misbehaved in the current span and should // not continue in the next election. also end the slashing span. spans.end_span(now); - >::chill_stash(stash); + >::chill_stash(stash); // make sure to disable validator till the end of this session if T::SessionInterface::disable_validator(stash).unwrap_or(false) { // force a new era, to select a new validator set - >::ensure_new_era() + >::ensure_new_era() } } } @@ -325,12 +325,12 @@ fn kick_out_if_recent( if spans.era_span(params.slash_era).map(|s| s.index) == Some(spans.span_index()) { spans.end_span(params.now); - >::chill_stash(params.stash); + >::chill_stash(params.stash); // make sure to disable validator till the end of this session if T::SessionInterface::disable_validator(params.stash).unwrap_or(false) { // force a new era, to select a new validator set - >::ensure_new_era() + >::ensure_new_era() } } } @@ -367,14 +367,14 @@ fn slash_nominators( let own_slash_by_validator = slash * nominator.value; let own_slash_difference = own_slash_by_validator.saturating_sub(own_slash_prior); - let mut era_slash = as Store>::NominatorSlashInEra::get( + let mut era_slash = as Store>::NominatorSlashInEra::get( &slash_era, stash, ).unwrap_or_else(|| Zero::zero()); era_slash += own_slash_difference; - as Store>::NominatorSlashInEra::insert( + as Store>::NominatorSlashInEra::insert( &slash_era, stash, &era_slash, @@ -437,9 +437,9 @@ fn fetch_spans<'a, T: Config + 'a>( slash_of: &'a mut BalanceOf, reward_proportion: Perbill, ) -> InspectingSpans<'a, T> { - let spans = as Store>::SlashingSpans::get(stash).unwrap_or_else(|| { + let spans = as Store>::SlashingSpans::get(stash).unwrap_or_else(|| { let spans = SlashingSpans::new(window_start); - as Store>::SlashingSpans::insert(stash, &spans); + as Store>::SlashingSpans::insert(stash, &spans); spans }); @@ -488,7 +488,7 @@ impl<'a, T: 'a + Config> InspectingSpans<'a, T> { ) -> Option { let target_span = self.era_span(slash_era)?; let span_slash_key = (self.stash.clone(), target_span.index); - let mut span_record = as Store>::SpanSlash::get(&span_slash_key); + let mut span_record = as Store>::SpanSlash::get(&span_slash_key); let mut changed = false; let reward = if span_record.slashed < slash { @@ -519,7 +519,7 @@ impl<'a, T: 'a + Config> InspectingSpans<'a, T> { if changed { self.dirty = true; - as Store>::SpanSlash::insert(&span_slash_key, &span_record); + as Store>::SpanSlash::insert(&span_slash_key, &span_record); } Some(target_span.index) @@ -533,18 +533,18 @@ impl<'a, T: 'a + Config> Drop for InspectingSpans<'a, T> { if let Some((start, end)) = self.spans.prune(self.window_start) { for span_index in start..end { - as Store>::SpanSlash::remove(&(self.stash.clone(), span_index)); + as Store>::SpanSlash::remove(&(self.stash.clone(), span_index)); } } - as Store>::SlashingSpans::insert(self.stash, &self.spans); + as Store>::SlashingSpans::insert(self.stash, &self.spans); } } /// Clear slashing metadata for an obsolete era. pub(crate) fn clear_era_metadata(obsolete_era: EraIndex) { - as Store>::ValidatorSlashInEra::remove_prefix(&obsolete_era); - as Store>::NominatorSlashInEra::remove_prefix(&obsolete_era); + as Store>::ValidatorSlashInEra::remove_prefix(&obsolete_era); + as Store>::NominatorSlashInEra::remove_prefix(&obsolete_era); } /// Clear slashing metadata for a dead account. @@ -552,14 +552,14 @@ pub(crate) fn clear_stash_metadata( stash: &T::AccountId, num_slashing_spans: u32, ) -> DispatchResult { - let spans = match as Store>::SlashingSpans::get(stash) { + let spans = match as Store>::SlashingSpans::get(stash) { None => return Ok(()), Some(s) => s, }; ensure!(num_slashing_spans as usize >= spans.iter().count(), Error::::IncorrectSlashingSpans); - as Store>::SlashingSpans::remove(stash); + as Store>::SlashingSpans::remove(stash); // kill slashing-span metadata for account. // @@ -567,7 +567,7 @@ pub(crate) fn clear_stash_metadata( // in that case, they may re-bond, but it would count again as span 0. Further ancient // slashes would slash into this new bond, since metadata has now been cleared. for span in spans.iter() { - as Store>::SpanSlash::remove(&(stash.clone(), span.index)); + as Store>::SpanSlash::remove(&(stash.clone(), span.index)); } Ok(()) @@ -582,12 +582,12 @@ pub fn do_slash( reward_payout: &mut BalanceOf, slashed_imbalance: &mut NegativeImbalanceOf, ) { - let controller = match >::bonded(stash) { + let controller = match >::bonded(stash) { None => return, // defensive: should always exist. Some(c) => c, }; - let mut ledger = match >::ledger(&controller) { + let mut ledger = match >::ledger(&controller) { Some(ledger) => ledger, None => return, // nothing to do. }; @@ -603,11 +603,11 @@ pub fn do_slash( *reward_payout = reward_payout.saturating_sub(missing); } - >::update_ledger(&controller, &ledger); + >::update_ledger(&controller, &ledger); // trigger the event - >::deposit_event( - super::RawEvent::Slash(stash.clone(), value) + >::deposit_event( + super::Event::::Slash(stash.clone(), value) ); } } diff --git a/frame/staking/src/testing_utils.rs b/frame/staking/src/testing_utils.rs index c4daf88098e75..185b96983ab94 100644 --- a/frame/staking/src/testing_utils.rs +++ b/frame/staking/src/testing_utils.rs @@ -19,7 +19,7 @@ //! bonding validators, nominators, and generating different types of solutions. use crate::*; -use crate::Module as Staking; +use crate::Pallet as Staking; use frame_benchmarking::account; use frame_system::RawOrigin; use sp_io::hashing::blake2_256; @@ -166,12 +166,12 @@ pub fn create_validators_with_nominators_for_era( Staking::::nominate(RawOrigin::Signed(n_controller.clone()).into(), selected_validators)?; } - ValidatorCount::put(validators); + ValidatorCount::::put(validators); Ok(validator_chosen) } /// get the current era. pub fn current_era() -> EraIndex { - >::current_era().unwrap_or(0) + >::current_era().unwrap_or(0) } diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index ec5a61d46885b..4473e89585002 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -17,7 +17,7 @@ //! Tests for the module. -use super::*; +use super::{*, Event}; use mock::*; use sp_runtime::{ assert_eq_error_rate, @@ -25,7 +25,7 @@ use sp_runtime::{ }; use sp_staking::offence::OffenceDetails; use frame_support::{ - assert_ok, assert_noop, StorageMap, + assert_ok, assert_noop, traits::{Currency, ReservableCurrency, OnInitialize}, weights::{extract_actual_weight, GetDispatchInfo}, }; @@ -187,10 +187,10 @@ fn rewards_should_work() { Payee::::insert(21, RewardDestination::Controller); Payee::::insert(101, RewardDestination::Controller); - >::reward_by_ids(vec![(11, 50)]); - >::reward_by_ids(vec![(11, 50)]); + >::reward_by_ids(vec![(11, 50)]); + >::reward_by_ids(vec![(11, 50)]); // This is the second validator of the current elected set. - >::reward_by_ids(vec![(21, 50)]); + >::reward_by_ids(vec![(21, 50)]); // Compute total payout now for whole duration of the session. let total_payout_0 = current_total_payout_for_duration(reward_time_per_era()); @@ -227,7 +227,7 @@ fn rewards_should_work() { ); assert_eq!( *mock::staking_events().last().unwrap(), - RawEvent::EraPayout(0, total_payout_0, maximum_payout - total_payout_0) + Event::EraPayout(0, total_payout_0, maximum_payout - total_payout_0) ); mock::make_all_reward_payment(0); @@ -253,7 +253,7 @@ fn rewards_should_work() { assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); assert_eq_uvec!(Session::validators(), vec![11, 21]); - >::reward_by_ids(vec![(11, 1)]); + >::reward_by_ids(vec![(11, 1)]); // Compute total payout now for whole duration as other parameter won't change let total_payout_1 = current_total_payout_for_duration(reward_time_per_era()); @@ -265,7 +265,7 @@ fn rewards_should_work() { ); assert_eq!( *mock::staking_events().last().unwrap(), - RawEvent::EraPayout(1, total_payout_1, maximum_payout - total_payout_1) + Event::EraPayout(1, total_payout_1, maximum_payout - total_payout_1) ); mock::make_all_reward_payment(1); @@ -482,8 +482,8 @@ fn nominating_and_rewards_should_work() { // the total reward for era 0 let total_payout_0 = current_total_payout_for_duration(reward_time_per_era()); - >::reward_by_ids(vec![(41, 1)]); - >::reward_by_ids(vec![(31, 1)]); + >::reward_by_ids(vec![(41, 1)]); + >::reward_by_ids(vec![(31, 1)]); mock::start_active_era(1); @@ -524,8 +524,8 @@ fn nominating_and_rewards_should_work() { // the total reward for era 1 let total_payout_1 = current_total_payout_for_duration(reward_time_per_era()); - >::reward_by_ids(vec![(21, 2)]); - >::reward_by_ids(vec![(11, 1)]); + >::reward_by_ids(vec![(21, 2)]); + >::reward_by_ids(vec![(11, 1)]); mock::start_active_era(2); @@ -779,7 +779,7 @@ fn forcing_new_era_works() { assert_eq!(active_era(), 1); // no era change. - ForceEra::put(Forcing::ForceNone); + ForceEra::::put(Forcing::ForceNone); start_session(4); assert_eq!(active_era(), 1); @@ -795,7 +795,7 @@ fn forcing_new_era_works() { // back to normal. // this immediately starts a new session. - ForceEra::put(Forcing::NotForcing); + ForceEra::::put(Forcing::NotForcing); start_session(8); assert_eq!(active_era(), 1); @@ -803,7 +803,7 @@ fn forcing_new_era_works() { start_session(9); assert_eq!(active_era(), 2); // forceful change - ForceEra::put(Forcing::ForceAlways); + ForceEra::::put(Forcing::ForceAlways); start_session(10); assert_eq!(active_era(), 2); @@ -815,10 +815,10 @@ fn forcing_new_era_works() { assert_eq!(active_era(), 4); // just one forceful change - ForceEra::put(Forcing::ForceNew); + ForceEra::::put(Forcing::ForceNew); start_session(13); assert_eq!(active_era(), 5); - assert_eq!(ForceEra::get(), Forcing::NotForcing); + assert_eq!(ForceEra::::get(), Forcing::NotForcing); start_session(14); assert_eq!(active_era(), 6); @@ -917,7 +917,7 @@ fn reward_destination_works() { // Compute total payout now for whole duration as other parameter won't change let total_payout_0 = current_total_payout_for_duration(reward_time_per_era()); - >::reward_by_ids(vec![(11, 1)]); + >::reward_by_ids(vec![(11, 1)]); mock::start_active_era(1); mock::make_all_reward_payment(0); @@ -940,7 +940,7 @@ fn reward_destination_works() { // Compute total payout now for whole duration as other parameter won't change let total_payout_1 = current_total_payout_for_duration(reward_time_per_era()); - >::reward_by_ids(vec![(11, 1)]); + >::reward_by_ids(vec![(11, 1)]); mock::start_active_era(2); mock::make_all_reward_payment(1); @@ -968,7 +968,7 @@ fn reward_destination_works() { // Compute total payout now for whole duration as other parameter won't change let total_payout_2 = current_total_payout_for_duration(reward_time_per_era()); - >::reward_by_ids(vec![(11, 1)]); + >::reward_by_ids(vec![(11, 1)]); mock::start_active_era(3); mock::make_all_reward_payment(2); @@ -1015,7 +1015,7 @@ fn validator_payment_prefs_work() { // Compute total payout now for whole duration as other parameter won't change let total_payout_1 = current_total_payout_for_duration(reward_time_per_era()); let exposure_1 = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); - >::reward_by_ids(vec![(11, 1)]); + >::reward_by_ids(vec![(11, 1)]); mock::start_active_era(2); mock::make_all_reward_payment(1); @@ -1508,8 +1508,8 @@ fn reward_to_stake_works() { // Compute total payout now for whole duration as other parameter won't change let total_payout_0 = current_total_payout_for_duration(reward_time_per_era()); - >::reward_by_ids(vec![(11, 1)]); - >::reward_by_ids(vec![(21, 1)]); + >::reward_by_ids(vec![(11, 1)]); + >::reward_by_ids(vec![(21, 1)]); // New era --> rewards are paid --> stakes are changed mock::start_active_era(1); @@ -2009,10 +2009,10 @@ fn reward_from_authorship_event_handler_works() { assert_eq!(>::author(), 11); - >::note_author(11); - >::note_uncle(21, 1); + >::note_author(11); + >::note_uncle(21, 1); // Rewarding the same two times works. - >::note_uncle(11, 1); + >::note_uncle(11, 1); // Not mandatory but must be coherent with rewards assert_eq_uvec!(Session::validators(), vec![11, 21]); @@ -2035,13 +2035,13 @@ fn add_reward_points_fns_works() { // Not mandatory but must be coherent with rewards assert_eq_uvec!(Session::validators(), vec![21, 11]); - >::reward_by_ids(vec![ + >::reward_by_ids(vec![ (21, 1), (11, 1), (11, 1), ]); - >::reward_by_ids(vec![ + >::reward_by_ids(vec![ (21, 1), (11, 1), (11, 1), @@ -2084,7 +2084,7 @@ fn era_is_always_same_length() { assert_eq!(Staking::eras_start_session_index(current_era()).unwrap(), session_per_era * 2u32); let session = Session::current_index(); - ForceEra::put(Forcing::ForceNew); + ForceEra::::put(Forcing::ForceNew); advance_session(); advance_session(); assert_eq!(current_era(), 3); @@ -2992,13 +2992,13 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { Payee::::insert(11, RewardDestination::Controller); Payee::::insert(101, RewardDestination::Controller); - >::reward_by_ids(vec![(11, 1)]); + >::reward_by_ids(vec![(11, 1)]); // Compute total payout now for whole duration as other parameter won't change let total_payout_0 = current_total_payout_for_duration(reward_time_per_era()); mock::start_active_era(1); - >::reward_by_ids(vec![(11, 1)]); + >::reward_by_ids(vec![(11, 1)]); // Change total issuance in order to modify total payout let _ = Balances::deposit_creating(&999, 1_000_000_000); // Compute total payout now for whole duration as other parameter won't change @@ -3007,7 +3007,7 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { mock::start_active_era(2); - >::reward_by_ids(vec![(11, 1)]); + >::reward_by_ids(vec![(11, 1)]); // Change total issuance in order to modify total payout let _ = Balances::deposit_creating(&999, 1_000_000_000); // Compute total payout now for whole duration as other parameter won't change @@ -3168,7 +3168,7 @@ fn test_max_nominator_rewarded_per_validator_and_cant_steal_someone_else_reward( } mock::start_active_era(1); - >::reward_by_ids(vec![(11, 1)]); + >::reward_by_ids(vec![(11, 1)]); // compute and ensure the reward amount is greater than zero. let _ = current_total_payout_for_duration(reward_time_per_era()); @@ -3832,7 +3832,7 @@ fn do_not_die_when_active_is_ed() { fn on_finalize_weight_is_nonzero() { ExtBuilder::default().build_and_execute(|| { let on_finalize_weight = ::DbWeight::get().reads(1); - assert!(Staking::on_initialize(1) >= on_finalize_weight); + assert!(>::on_initialize(1) >= on_finalize_weight); }) } @@ -3954,7 +3954,7 @@ mod election_data_provider { assert_eq!(staking_events().len(), 1); assert_eq!( *staking_events().last().unwrap(), - RawEvent::StakingElection + Event::StakingElection ); for b in 21..45 { @@ -3968,7 +3968,7 @@ mod election_data_provider { assert_eq!(staking_events().len(), 3); assert_eq!( *staking_events().last().unwrap(), - RawEvent::StakingElection + Event::StakingElection ); }) } From 3325b100195539427e6b828f7ea4a36341df2c8e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sat, 12 Jun 2021 01:38:17 +0100 Subject: [PATCH 33/61] pallet-offences: Switch to partition_point (#9049) This changes the code to use `partition_point` instead of `binary_search_by_key`, because this was very likely the problematic pallet 2 weeks ago on polkadot. --- frame/offences/src/lib.rs | 11 ++----- frame/offences/src/mock.rs | 5 +++ frame/offences/src/tests.rs | 61 ++++++++++++++++++++++++++++++++++++- 3 files changed, 68 insertions(+), 9 deletions(-) diff --git a/frame/offences/src/lib.rs b/frame/offences/src/lib.rs index 82665099d65db..1076dd615496d 100644 --- a/frame/offences/src/lib.rs +++ b/frame/offences/src/lib.rs @@ -281,15 +281,10 @@ impl> ReportIndexStorage { fn insert(&mut self, time_slot: &O::TimeSlot, report_id: ReportIdOf) { // Insert the report id into the list while maintaining the ordering by the time // slot. - let pos = match self + let pos = self .same_kind_reports - .binary_search_by_key(&time_slot, |&(ref when, _)| when) - { - Ok(pos) => pos, - Err(pos) => pos, - }; - self.same_kind_reports - .insert(pos, (time_slot.clone(), report_id)); + .partition_point(|&(ref when, _)| when <= time_slot); + self.same_kind_reports.insert(pos, (time_slot.clone(), report_id)); // Update the list of concurrent reports. self.concurrent_reports.push(report_id); diff --git a/frame/offences/src/mock.rs b/frame/offences/src/mock.rs index e7655d7ee29a7..a494ab02ebbd1 100644 --- a/frame/offences/src/mock.rs +++ b/frame/offences/src/mock.rs @@ -170,3 +170,8 @@ impl offence::Offence for Offence { Perbill::from_percent(5 + offenders_count * 100 / validator_set_count) } } + +/// Create the report id for the given `offender` and `time_slot` combination. +pub fn report_id(time_slot: u128, offender: u64) -> H256 { + Offences::report_id::>(&time_slot, &offender) +} diff --git a/frame/offences/src/tests.rs b/frame/offences/src/tests.rs index edc22cb239c44..d2e0f2d63d550 100644 --- a/frame/offences/src/tests.rs +++ b/frame/offences/src/tests.rs @@ -22,7 +22,7 @@ use super::*; use crate::mock::{ Offences, System, Offence, Event, KIND, new_test_ext, with_on_offence_fractions, - offence_reports, + offence_reports, report_id, }; use sp_runtime::Perbill; use frame_system::{EventRecord, Phase}; @@ -284,3 +284,62 @@ fn should_properly_count_offences() { ); }); } + +/// We insert offences in sorted order using the time slot in the `same_kind_reports`. +/// This test ensures that it works as expected. +#[test] +fn should_properly_sort_offences() { + new_test_ext().execute_with(|| { + // given + let time_slot = 42; + assert_eq!(offence_reports(KIND, time_slot), vec![]); + + let offence1 = Offence { + validator_set_count: 5, + time_slot, + offenders: vec![5], + }; + let offence2 = Offence { + validator_set_count: 5, + time_slot, + offenders: vec![4], + }; + let offence3 = Offence { + validator_set_count: 5, + time_slot: time_slot + 1, + offenders: vec![6, 7], + }; + let offence4 = Offence { + validator_set_count: 5, + time_slot: time_slot - 1, + offenders: vec![3], + }; + Offences::report_offence(vec![], offence1).unwrap(); + with_on_offence_fractions(|f| { + assert_eq!(f.clone(), vec![Perbill::from_percent(25)]); + f.clear(); + }); + + // when + // report for the second time + Offences::report_offence(vec![], offence2).unwrap(); + Offences::report_offence(vec![], offence3).unwrap(); + Offences::report_offence(vec![], offence4).unwrap(); + + // then + let same_kind_reports = + Vec::<(u128, sp_core::H256)>::decode( + &mut &crate::ReportsByKindIndex::::get(KIND)[..], + ).unwrap(); + assert_eq!( + same_kind_reports, + vec![ + (time_slot - 1, report_id(time_slot - 1, 3)), + (time_slot, report_id(time_slot, 5)), + (time_slot, report_id(time_slot, 4)), + (time_slot + 1, report_id(time_slot + 1, 6)), + (time_slot + 1, report_id(time_slot + 1, 7)), + ] + ); + }); +} From 6d82c02611c51a5c54d60e02c3c104b10aa3ae71 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Sat, 12 Jun 2021 02:47:22 +0200 Subject: [PATCH 34/61] disable unused schnorrkel feature (#9084) --- Cargo.lock | 1 - primitives/core/Cargo.toml | 1 - primitives/keystore/Cargo.toml | 1 - primitives/keystore/src/vrf.rs | 1 - 4 files changed, 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4572ed354ab14..a52f4250b5a64 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8247,7 +8247,6 @@ dependencies = [ "merlin", "rand 0.7.3", "rand_core 0.5.1", - "serde", "sha2 0.8.2", "subtle 2.4.0", "zeroize", diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index 831e62d6f9521..0c724d61ae0cc 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -101,7 +101,6 @@ std = [ "rand", "sha2/std", "schnorrkel/std", - "schnorrkel/serde", "regex", "num-traits/std", "tiny-keccak", diff --git a/primitives/keystore/Cargo.toml b/primitives/keystore/Cargo.toml index 81404ce344a21..d4ebfc1c74c60 100644 --- a/primitives/keystore/Cargo.toml +++ b/primitives/keystore/Cargo.toml @@ -34,5 +34,4 @@ default = ["std"] std = [ "serde", "schnorrkel/std", - "schnorrkel/serde", ] diff --git a/primitives/keystore/src/vrf.rs b/primitives/keystore/src/vrf.rs index 463a565f9d86c..04286eea82761 100644 --- a/primitives/keystore/src/vrf.rs +++ b/primitives/keystore/src/vrf.rs @@ -40,7 +40,6 @@ pub struct VRFTranscriptData { pub items: Vec<(&'static str, VRFTranscriptValue)>, } /// VRF signature data -#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct VRFSignature { /// The VRFOutput serialized pub output: VRFOutput, From 2c210396dbf67968a5f631912fa4addefdc8d11a Mon Sep 17 00:00:00 2001 From: Alexander Popiak Date: Sat, 12 Jun 2021 12:43:08 +0200 Subject: [PATCH 35/61] make all extrinsics public so they are available from outside (#9078) Co-authored-by: thiolliere --- frame/assets/src/lib.rs | 46 +++++++++++++------------- frame/atomic-swap/src/lib.rs | 6 ++-- frame/balances/src/lib.rs | 2 +- frame/democracy/src/lib.rs | 48 ++++++++++++++-------------- frame/elections-phragmen/src/lib.rs | 12 +++---- frame/example/src/lib.rs | 4 +-- frame/grandpa/src/lib.rs | 2 +- frame/identity/src/lib.rs | 30 ++++++++--------- frame/indices/src/lib.rs | 10 +++--- frame/lottery/src/lib.rs | 8 ++--- frame/multisig/src/lib.rs | 8 ++--- frame/nicks/src/lib.rs | 8 ++--- frame/proxy/src/lib.rs | 20 ++++++------ frame/recovery/src/lib.rs | 18 +++++------ frame/scheduler/src/lib.rs | 12 +++---- frame/sudo/src/lib.rs | 8 ++--- frame/support/src/lib.rs | 2 +- frame/system/src/lib.rs | 14 ++++---- frame/timestamp/src/lib.rs | 2 +- frame/transaction-storage/src/lib.rs | 6 ++-- frame/uniques/src/lib.rs | 44 ++++++++++++------------- 21 files changed, 155 insertions(+), 155 deletions(-) diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index 333dbad836462..afcdb5b054d04 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -368,7 +368,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::create())] - pub(super) fn create( + pub fn create( origin: OriginFor, #[pallet::compact] id: T::AssetId, admin: ::Source, @@ -424,7 +424,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::force_create())] - pub(super) fn force_create( + pub fn force_create( origin: OriginFor, #[pallet::compact] id: T::AssetId, owner: ::Source, @@ -477,7 +477,7 @@ pub mod pallet { witness.sufficients, witness.approvals, ))] - pub(super) fn destroy( + pub fn destroy( origin: OriginFor, #[pallet::compact] id: T::AssetId, witness: DestroyWitness, @@ -528,7 +528,7 @@ pub mod pallet { /// Weight: `O(1)` /// Modes: Pre-existing balance of `beneficiary`; Account pre-existence of `beneficiary`. #[pallet::weight(T::WeightInfo::mint())] - pub(super) fn mint( + pub fn mint( origin: OriginFor, #[pallet::compact] id: T::AssetId, beneficiary: ::Source, @@ -556,7 +556,7 @@ pub mod pallet { /// Weight: `O(1)` /// Modes: Post-existence of `who`; Pre & post Zombie-status of `who`. #[pallet::weight(T::WeightInfo::burn())] - pub(super) fn burn( + pub fn burn( origin: OriginFor, #[pallet::compact] id: T::AssetId, who: ::Source, @@ -589,7 +589,7 @@ pub mod pallet { /// Modes: Pre-existence of `target`; Post-existence of sender; Account pre-existence of /// `target`. #[pallet::weight(T::WeightInfo::transfer())] - pub(super) fn transfer( + pub fn transfer( origin: OriginFor, #[pallet::compact] id: T::AssetId, target: ::Source, @@ -625,7 +625,7 @@ pub mod pallet { /// Modes: Pre-existence of `target`; Post-existence of sender; Account pre-existence of /// `target`. #[pallet::weight(T::WeightInfo::transfer_keep_alive())] - pub(super) fn transfer_keep_alive( + pub fn transfer_keep_alive( origin: OriginFor, #[pallet::compact] id: T::AssetId, target: ::Source, @@ -662,7 +662,7 @@ pub mod pallet { /// Modes: Pre-existence of `dest`; Post-existence of `source`; Account pre-existence of /// `dest`. #[pallet::weight(T::WeightInfo::force_transfer())] - pub(super) fn force_transfer( + pub fn force_transfer( origin: OriginFor, #[pallet::compact] id: T::AssetId, source: ::Source, @@ -692,7 +692,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::freeze())] - pub(super) fn freeze( + pub fn freeze( origin: OriginFor, #[pallet::compact] id: T::AssetId, who: ::Source @@ -724,7 +724,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::thaw())] - pub(super) fn thaw( + pub fn thaw( origin: OriginFor, #[pallet::compact] id: T::AssetId, @@ -756,7 +756,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::freeze_asset())] - pub(super) fn freeze_asset( + pub fn freeze_asset( origin: OriginFor, #[pallet::compact] id: T::AssetId ) -> DispatchResult { @@ -783,7 +783,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::thaw_asset())] - pub(super) fn thaw_asset( + pub fn thaw_asset( origin: OriginFor, #[pallet::compact] id: T::AssetId ) -> DispatchResult { @@ -811,7 +811,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::transfer_ownership())] - pub(super) fn transfer_ownership( + pub fn transfer_ownership( origin: OriginFor, #[pallet::compact] id: T::AssetId, owner: ::Source, @@ -852,7 +852,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::set_team())] - pub(super) fn set_team( + pub fn set_team( origin: OriginFor, #[pallet::compact] id: T::AssetId, issuer: ::Source, @@ -894,7 +894,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::set_metadata(name.len() as u32, symbol.len() as u32))] - pub(super) fn set_metadata( + pub fn set_metadata( origin: OriginFor, #[pallet::compact] id: T::AssetId, name: Vec, @@ -957,7 +957,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::clear_metadata())] - pub(super) fn clear_metadata( + pub fn clear_metadata( origin: OriginFor, #[pallet::compact] id: T::AssetId, ) -> DispatchResult { @@ -989,7 +989,7 @@ pub mod pallet { /// /// Weight: `O(N + S)` where N and S are the length of the name and symbol respectively. #[pallet::weight(T::WeightInfo::force_set_metadata(name.len() as u32, symbol.len() as u32))] - pub(super) fn force_set_metadata( + pub fn force_set_metadata( origin: OriginFor, #[pallet::compact] id: T::AssetId, name: Vec, @@ -1037,7 +1037,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::force_clear_metadata())] - pub(super) fn force_clear_metadata( + pub fn force_clear_metadata( origin: OriginFor, #[pallet::compact] id: T::AssetId, ) -> DispatchResult { @@ -1075,7 +1075,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::force_asset_status())] - pub(super) fn force_asset_status( + pub fn force_asset_status( origin: OriginFor, #[pallet::compact] id: T::AssetId, owner: ::Source, @@ -1125,7 +1125,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::approve_transfer())] - pub(super) fn approve_transfer( + pub fn approve_transfer( origin: OriginFor, #[pallet::compact] id: T::AssetId, delegate: ::Source, @@ -1164,7 +1164,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::cancel_approval())] - pub(super) fn cancel_approval( + pub fn cancel_approval( origin: OriginFor, #[pallet::compact] id: T::AssetId, delegate: ::Source, @@ -1192,7 +1192,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::force_cancel_approval())] - pub(super) fn force_cancel_approval( + pub fn force_cancel_approval( origin: OriginFor, #[pallet::compact] id: T::AssetId, owner: ::Source, @@ -1236,7 +1236,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::transfer_approved())] - pub(super) fn transfer_approved( + pub fn transfer_approved( origin: OriginFor, #[pallet::compact] id: T::AssetId, owner: ::Source, diff --git a/frame/atomic-swap/src/lib.rs b/frame/atomic-swap/src/lib.rs index afc74dd2a549e..4c19a61bb72f3 100644 --- a/frame/atomic-swap/src/lib.rs +++ b/frame/atomic-swap/src/lib.rs @@ -226,7 +226,7 @@ pub mod pallet { /// that the revealer uses a shorter duration than the counterparty, to prevent the /// situation where the revealer reveals the proof too late around the end block. #[pallet::weight(T::DbWeight::get().reads_writes(1, 1).saturating_add(40_000_000))] - pub(crate) fn create_swap( + pub fn create_swap( origin: OriginFor, target: T::AccountId, hashed_proof: HashedProof, @@ -268,7 +268,7 @@ pub mod pallet { .saturating_add((proof.len() as Weight).saturating_mul(100)) .saturating_add(action.weight()) )] - pub(crate) fn claim_swap( + pub fn claim_swap( origin: OriginFor, proof: Vec, action: T::SwapAction, @@ -303,7 +303,7 @@ pub mod pallet { /// - `target`: Target of the original atomic swap. /// - `hashed_proof`: Hashed proof of the original atomic swap. #[pallet::weight(T::DbWeight::get().reads_writes(1, 1).saturating_add(40_000_000))] - pub(crate) fn cancel_swap( + pub fn cancel_swap( origin: OriginFor, target: T::AccountId, hashed_proof: HashedProof, diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index 105c5d08a659c..5dccd7da267f2 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -291,7 +291,7 @@ pub mod pallet { T::WeightInfo::set_balance_creating() // Creates a new account. .max(T::WeightInfo::set_balance_killing()) // Kills an existing account. )] - pub(super) fn set_balance( + pub fn set_balance( origin: OriginFor, who: ::Source, #[pallet::compact] new_free: T::Balance, diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index 70b943bf00d54..6ebe917f56ae5 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -634,7 +634,7 @@ pub mod pallet { /// /// Weight: `O(p)` #[pallet::weight(T::WeightInfo::propose())] - pub(crate) fn propose( + pub fn propose( origin: OriginFor, proposal_hash: T::Hash, #[pallet::compact] value: BalanceOf, @@ -675,7 +675,7 @@ pub mod pallet { /// /// Weight: `O(S)` where S is the number of seconds a proposal already has. #[pallet::weight(T::WeightInfo::second(*seconds_upper_bound))] - pub(crate) fn second( + pub fn second( origin: OriginFor, #[pallet::compact] proposal: PropIndex, #[pallet::compact] seconds_upper_bound: u32, @@ -706,7 +706,7 @@ pub mod pallet { T::WeightInfo::vote_new(T::MaxVotes::get()) .max(T::WeightInfo::vote_existing(T::MaxVotes::get())) )] - pub(crate) fn vote( + pub fn vote( origin: OriginFor, #[pallet::compact] ref_index: ReferendumIndex, vote: AccountVote>, @@ -724,7 +724,7 @@ pub mod pallet { /// /// Weight: `O(1)`. #[pallet::weight((T::WeightInfo::emergency_cancel(), DispatchClass::Operational))] - pub(crate) fn emergency_cancel(origin: OriginFor, ref_index: ReferendumIndex) -> DispatchResult { + pub fn emergency_cancel(origin: OriginFor, ref_index: ReferendumIndex) -> DispatchResult { T::CancellationOrigin::ensure_origin(origin)?; let status = Self::referendum_status(ref_index)?; @@ -746,7 +746,7 @@ pub mod pallet { /// Weight: `O(V)` with V number of vetoers in the blacklist of proposal. /// Decoding vec of length V. Charged as maximum #[pallet::weight(T::WeightInfo::external_propose(MAX_VETOERS))] - pub(crate) fn external_propose(origin: OriginFor, proposal_hash: T::Hash) -> DispatchResult { + pub fn external_propose(origin: OriginFor, proposal_hash: T::Hash) -> DispatchResult { T::ExternalOrigin::ensure_origin(origin)?; ensure!(!>::exists(), Error::::DuplicateProposal); if let Some((until, _)) = >::get(proposal_hash) { @@ -771,7 +771,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::external_propose_majority())] - pub(crate) fn external_propose_majority( + pub fn external_propose_majority( origin: OriginFor, proposal_hash: T::Hash, ) -> DispatchResult { @@ -792,7 +792,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::external_propose_default())] - pub(crate) fn external_propose_default( + pub fn external_propose_default( origin: OriginFor, proposal_hash: T::Hash, ) -> DispatchResult { @@ -817,7 +817,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::fast_track())] - pub(crate) fn fast_track( + pub fn fast_track( origin: OriginFor, proposal_hash: T::Hash, voting_period: T::BlockNumber, @@ -864,7 +864,7 @@ pub mod pallet { /// /// Weight: `O(V + log(V))` where V is number of `existing vetoers` #[pallet::weight(T::WeightInfo::veto_external(MAX_VETOERS))] - pub(crate) fn veto_external(origin: OriginFor, proposal_hash: T::Hash) -> DispatchResult { + pub fn veto_external(origin: OriginFor, proposal_hash: T::Hash) -> DispatchResult { let who = T::VetoOrigin::ensure_origin(origin)?; if let Some((e_proposal_hash, _)) = >::get() { @@ -896,7 +896,7 @@ pub mod pallet { /// /// # Weight: `O(1)`. #[pallet::weight(T::WeightInfo::cancel_referendum())] - pub(crate) fn cancel_referendum( + pub fn cancel_referendum( origin: OriginFor, #[pallet::compact] ref_index: ReferendumIndex, ) -> DispatchResult { @@ -913,7 +913,7 @@ pub mod pallet { /// /// Weight: `O(D)` where `D` is the items in the dispatch queue. Weighted as `D = 10`. #[pallet::weight((T::WeightInfo::cancel_queued(10), DispatchClass::Operational))] - pub(crate) fn cancel_queued(origin: OriginFor, which: ReferendumIndex) -> DispatchResult { + pub fn cancel_queued(origin: OriginFor, which: ReferendumIndex) -> DispatchResult { ensure_root(origin)?; T::Scheduler::cancel_named((DEMOCRACY_ID, which).encode()) .map_err(|_| Error::::ProposalMissing)?; @@ -970,7 +970,7 @@ pub mod pallet { // NOTE: weight must cover an incorrect voting of origin with max votes, this is ensure // because a valid delegation cover decoding a direct voting with max votes. #[pallet::weight(T::WeightInfo::undelegate(T::MaxVotes::get().into()))] - pub(crate) fn undelegate(origin: OriginFor) -> DispatchResultWithPostInfo { + pub fn undelegate(origin: OriginFor) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; let votes = Self::try_undelegate(who)?; Ok(Some(T::WeightInfo::undelegate(votes)).into()) @@ -982,7 +982,7 @@ pub mod pallet { /// /// Weight: `O(1)`. #[pallet::weight(T::WeightInfo::clear_public_proposals())] - pub(crate) fn clear_public_proposals(origin: OriginFor) -> DispatchResult { + pub fn clear_public_proposals(origin: OriginFor) -> DispatchResult { ensure_root(origin)?; >::kill(); Ok(()) @@ -999,7 +999,7 @@ pub mod pallet { /// /// Weight: `O(E)` with E size of `encoded_proposal` (protected by a required deposit). #[pallet::weight(T::WeightInfo::note_preimage(encoded_proposal.len() as u32))] - pub(crate) fn note_preimage(origin: OriginFor, encoded_proposal: Vec) -> DispatchResult { + pub fn note_preimage(origin: OriginFor, encoded_proposal: Vec) -> DispatchResult { Self::note_preimage_inner(ensure_signed(origin)?, encoded_proposal)?; Ok(()) } @@ -1009,7 +1009,7 @@ pub mod pallet { T::WeightInfo::note_preimage(encoded_proposal.len() as u32), DispatchClass::Operational, ))] - pub(crate) fn note_preimage_operational( + pub fn note_preimage_operational( origin: OriginFor, encoded_proposal: Vec, ) -> DispatchResult { @@ -1031,7 +1031,7 @@ pub mod pallet { /// /// Weight: `O(E)` with E size of `encoded_proposal` (protected by a required deposit). #[pallet::weight(T::WeightInfo::note_imminent_preimage(encoded_proposal.len() as u32))] - pub(crate) fn note_imminent_preimage( + pub fn note_imminent_preimage( origin: OriginFor, encoded_proposal: Vec, ) -> DispatchResultWithPostInfo { @@ -1046,7 +1046,7 @@ pub mod pallet { T::WeightInfo::note_imminent_preimage(encoded_proposal.len() as u32), DispatchClass::Operational, ))] - pub(crate) fn note_imminent_preimage_operational( + pub fn note_imminent_preimage_operational( origin: OriginFor, encoded_proposal: Vec, ) -> DispatchResultWithPostInfo { @@ -1073,7 +1073,7 @@ pub mod pallet { /// /// Weight: `O(D)` where D is length of proposal. #[pallet::weight(T::WeightInfo::reap_preimage(*proposal_len_upper_bound))] - pub(crate) fn reap_preimage( + pub fn reap_preimage( origin: OriginFor, proposal_hash: T::Hash, #[pallet::compact] proposal_len_upper_bound: u32, @@ -1116,7 +1116,7 @@ pub mod pallet { T::WeightInfo::unlock_set(T::MaxVotes::get()) .max(T::WeightInfo::unlock_remove(T::MaxVotes::get())) )] - pub(crate) fn unlock(origin: OriginFor, target: T::AccountId) -> DispatchResult { + pub fn unlock(origin: OriginFor, target: T::AccountId) -> DispatchResult { ensure_signed(origin)?; Self::update_lock(&target); Ok(()) @@ -1150,7 +1150,7 @@ pub mod pallet { /// Weight: `O(R + log R)` where R is the number of referenda that `target` has voted on. /// Weight is calculated for the maximum number of vote. #[pallet::weight(T::WeightInfo::remove_vote(T::MaxVotes::get()))] - pub(crate) fn remove_vote(origin: OriginFor, index: ReferendumIndex) -> DispatchResult { + pub fn remove_vote(origin: OriginFor, index: ReferendumIndex) -> DispatchResult { let who = ensure_signed(origin)?; Self::try_remove_vote(&who, index, UnvoteScope::Any) } @@ -1171,7 +1171,7 @@ pub mod pallet { /// Weight: `O(R + log R)` where R is the number of referenda that `target` has voted on. /// Weight is calculated for the maximum number of vote. #[pallet::weight(T::WeightInfo::remove_other_vote(T::MaxVotes::get()))] - pub(crate) fn remove_other_vote( + pub fn remove_other_vote( origin: OriginFor, target: T::AccountId, index: ReferendumIndex, @@ -1184,7 +1184,7 @@ pub mod pallet { /// Enact a proposal from a referendum. For now we just make the weight be the maximum. #[pallet::weight(T::BlockWeights::get().max_block)] - pub(crate) fn enact_proposal( + pub fn enact_proposal( origin: OriginFor, proposal_hash: T::Hash, index: ReferendumIndex, @@ -1209,7 +1209,7 @@ pub mod pallet { /// Weight: `O(p)` (though as this is an high-privilege dispatch, we assume it has a /// reasonable value). #[pallet::weight((T::WeightInfo::blacklist(T::MaxProposals::get()), DispatchClass::Operational))] - pub(crate) fn blacklist(origin: OriginFor, + pub fn blacklist(origin: OriginFor, proposal_hash: T::Hash, maybe_ref_index: Option, ) -> DispatchResult { @@ -1257,7 +1257,7 @@ pub mod pallet { /// /// Weight: `O(p)` where `p = PublicProps::::decode_len()` #[pallet::weight(T::WeightInfo::cancel_proposal(T::MaxProposals::get()))] - pub(crate) fn cancel_proposal( + pub fn cancel_proposal( origin: OriginFor, #[pallet::compact] prop_index: PropIndex, ) -> DispatchResult { diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index 556c57eea5a10..8a1680633ef7b 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -286,7 +286,7 @@ pub mod pallet { .max(T::WeightInfo::vote_less(votes.len() as u32)) .max(T::WeightInfo::vote_equal(votes.len() as u32)) )] - pub(crate) fn vote( + pub fn vote( origin: OriginFor, votes: Vec, #[pallet::compact] value: BalanceOf, @@ -349,7 +349,7 @@ pub mod pallet { /// /// The dispatch origin of this call must be signed and be a voter. #[pallet::weight(T::WeightInfo::remove_voter())] - pub(crate) fn remove_voter(origin: OriginFor) -> DispatchResultWithPostInfo { + pub fn remove_voter(origin: OriginFor) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; ensure!(Self::is_voter(&who), Error::::MustBeVoter); Self::do_remove_voter(&who); @@ -372,7 +372,7 @@ pub mod pallet { /// The number of current candidates must be provided as witness data. /// # #[pallet::weight(T::WeightInfo::submit_candidacy(*candidate_count))] - pub(crate) fn submit_candidacy( + pub fn submit_candidacy( origin: OriginFor, #[pallet::compact] candidate_count: u32, ) -> DispatchResultWithPostInfo { @@ -415,7 +415,7 @@ pub mod pallet { Renouncing::Member => T::WeightInfo::renounce_candidacy_members(), Renouncing::RunnerUp => T::WeightInfo::renounce_candidacy_runners_up(), })] - pub(crate) fn renounce_candidacy( + pub fn renounce_candidacy( origin: OriginFor, renouncing: Renouncing, ) -> DispatchResultWithPostInfo { @@ -476,7 +476,7 @@ pub mod pallet { } else { T::BlockWeights::get().max_block })] - pub(crate) fn remove_member( + pub fn remove_member( origin: OriginFor, who: ::Source, has_replacement: bool, @@ -516,7 +516,7 @@ pub mod pallet { /// The total number of voters and those that are defunct must be provided as witness data. /// # #[pallet::weight(T::WeightInfo::clean_defunct_voters(*_num_voters, *_num_defunct))] - pub(crate) fn clean_defunct_voters( + pub fn clean_defunct_voters( origin: OriginFor, _num_voters: u32, _num_defunct: u32, diff --git a/frame/example/src/lib.rs b/frame/example/src/lib.rs index fd1bc292ac8aa..f5014b75640ba 100644 --- a/frame/example/src/lib.rs +++ b/frame/example/src/lib.rs @@ -488,7 +488,7 @@ pub mod pallet { #[pallet::weight( ::WeightInfo::accumulate_dummy((*increase_by).saturated_into()) )] - pub(super) fn accumulate_dummy( + pub fn accumulate_dummy( origin: OriginFor, increase_by: T::Balance ) -> DispatchResult { @@ -533,7 +533,7 @@ pub mod pallet { // The weight for this extrinsic we use our own weight object `WeightForSetDummy` to determine // its weight #[pallet::weight(WeightForSetDummy::(>::from(100u32)))] - pub(super) fn set_dummy( + pub fn set_dummy( origin: OriginFor, #[pallet::compact] new_value: T::Balance, ) -> DispatchResult { diff --git a/frame/grandpa/src/lib.rs b/frame/grandpa/src/lib.rs index 952e0d646135b..28546018a978f 100644 --- a/frame/grandpa/src/lib.rs +++ b/frame/grandpa/src/lib.rs @@ -214,7 +214,7 @@ pub mod pallet { /// if the block author is defined it will be defined as the equivocation /// reporter. #[pallet::weight(T::WeightInfo::report_equivocation(key_owner_proof.validator_count()))] - pub(super) fn report_equivocation_unsigned( + pub fn report_equivocation_unsigned( origin: OriginFor, equivocation_proof: EquivocationProof, key_owner_proof: T::KeyOwnerProof, diff --git a/frame/identity/src/lib.rs b/frame/identity/src/lib.rs index 91b3f3a50fc48..b71b069ccb74f 100644 --- a/frame/identity/src/lib.rs +++ b/frame/identity/src/lib.rs @@ -549,7 +549,7 @@ pub mod pallet { /// - One event. /// # #[pallet::weight(T::WeightInfo::add_registrar(T::MaxRegistrars::get()))] - pub(super) fn add_registrar(origin: OriginFor, account: T::AccountId) -> DispatchResultWithPostInfo { + pub fn add_registrar(origin: OriginFor, account: T::AccountId) -> DispatchResultWithPostInfo { T::RegistrarOrigin::ensure_origin(origin)?; let (i, registrar_count) = >::try_mutate( @@ -590,7 +590,7 @@ pub mod pallet { T::MaxRegistrars::get().into(), // R T::MaxAdditionalFields::get().into(), // X ))] - pub(super) fn set_identity(origin: OriginFor, info: IdentityInfo) -> DispatchResultWithPostInfo { + pub fn set_identity(origin: OriginFor, info: IdentityInfo) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; let extra_fields = info.additional.len() as u32; ensure!(extra_fields <= T::MaxAdditionalFields::get(), Error::::TooManyFields); @@ -656,7 +656,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::set_subs_old(T::MaxSubAccounts::get()) // P: Assume max sub accounts removed. .saturating_add(T::WeightInfo::set_subs_new(subs.len() as u32)) // S: Assume all subs are new. )] - pub(super) fn set_subs(origin: OriginFor, subs: Vec<(T::AccountId, Data)>) -> DispatchResultWithPostInfo { + pub fn set_subs(origin: OriginFor, subs: Vec<(T::AccountId, Data)>) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; ensure!(>::contains_key(&sender), Error::::NotFound); ensure!(subs.len() <= T::MaxSubAccounts::get() as usize, Error::::TooManySubAccounts); @@ -719,7 +719,7 @@ pub mod pallet { T::MaxSubAccounts::get().into(), // S T::MaxAdditionalFields::get().into(), // X ))] - pub(super) fn clear_identity(origin: OriginFor) -> DispatchResultWithPostInfo { + pub fn clear_identity(origin: OriginFor) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; let (subs_deposit, sub_ids) = >::take(&sender); @@ -768,7 +768,7 @@ pub mod pallet { T::MaxRegistrars::get().into(), // R T::MaxAdditionalFields::get().into(), // X ))] - pub(super) fn request_judgement(origin: OriginFor, + pub fn request_judgement(origin: OriginFor, #[pallet::compact] reg_index: RegistrarIndex, #[pallet::compact] max_fee: BalanceOf, ) -> DispatchResultWithPostInfo { @@ -824,7 +824,7 @@ pub mod pallet { T::MaxRegistrars::get().into(), // R T::MaxAdditionalFields::get().into(), // X ))] - pub(super) fn cancel_request(origin: OriginFor, reg_index: RegistrarIndex) -> DispatchResultWithPostInfo { + pub fn cancel_request(origin: OriginFor, reg_index: RegistrarIndex) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; let mut id = >::get(&sender).ok_or(Error::::NoIdentity)?; @@ -864,7 +864,7 @@ pub mod pallet { /// - Benchmark: 7.315 + R * 0.329 µs (min squares analysis) /// # #[pallet::weight(T::WeightInfo::set_fee(T::MaxRegistrars::get()))] // R - pub(super) fn set_fee(origin: OriginFor, + pub fn set_fee(origin: OriginFor, #[pallet::compact] index: RegistrarIndex, #[pallet::compact] fee: BalanceOf, ) -> DispatchResultWithPostInfo { @@ -894,7 +894,7 @@ pub mod pallet { /// - Benchmark: 8.823 + R * 0.32 µs (min squares analysis) /// # #[pallet::weight(T::WeightInfo::set_account_id(T::MaxRegistrars::get()))] // R - pub(super) fn set_account_id(origin: OriginFor, + pub fn set_account_id(origin: OriginFor, #[pallet::compact] index: RegistrarIndex, new: T::AccountId, ) -> DispatchResultWithPostInfo { @@ -924,7 +924,7 @@ pub mod pallet { /// - Benchmark: 7.464 + R * 0.325 µs (min squares analysis) /// # #[pallet::weight(T::WeightInfo::set_fields(T::MaxRegistrars::get()))] // R - pub(super) fn set_fields(origin: OriginFor, + pub fn set_fields(origin: OriginFor, #[pallet::compact] index: RegistrarIndex, fields: IdentityFields, ) -> DispatchResultWithPostInfo { @@ -965,7 +965,7 @@ pub mod pallet { T::MaxRegistrars::get().into(), // R T::MaxAdditionalFields::get().into(), // X ))] - pub(super) fn provide_judgement(origin: OriginFor, + pub fn provide_judgement(origin: OriginFor, #[pallet::compact] reg_index: RegistrarIndex, target: ::Source, judgement: Judgement>, @@ -1026,7 +1026,7 @@ pub mod pallet { T::MaxSubAccounts::get().into(), // S T::MaxAdditionalFields::get().into(), // X ))] - pub(super) fn kill_identity( + pub fn kill_identity( origin: OriginFor, target: ::Source ) -> DispatchResultWithPostInfo { T::ForceOrigin::ensure_origin(origin)?; @@ -1060,7 +1060,7 @@ pub mod pallet { /// The dispatch origin for this call must be _Signed_ and the sender must have a registered /// sub identity of `sub`. #[pallet::weight(T::WeightInfo::add_sub(T::MaxSubAccounts::get()))] - pub(super) fn add_sub(origin: OriginFor, sub: ::Source, data: Data) -> DispatchResult { + pub fn add_sub(origin: OriginFor, sub: ::Source, data: Data) -> DispatchResult { let sender = ensure_signed(origin)?; let sub = T::Lookup::lookup(sub)?; ensure!(IdentityOf::::contains_key(&sender), Error::::NoIdentity); @@ -1088,7 +1088,7 @@ pub mod pallet { /// The dispatch origin for this call must be _Signed_ and the sender must have a registered /// sub identity of `sub`. #[pallet::weight(T::WeightInfo::rename_sub(T::MaxSubAccounts::get()))] - pub(super) fn rename_sub( + pub fn rename_sub( origin: OriginFor, sub: ::Source, data: Data ) -> DispatchResult { let sender = ensure_signed(origin)?; @@ -1107,7 +1107,7 @@ pub mod pallet { /// The dispatch origin for this call must be _Signed_ and the sender must have a registered /// sub identity of `sub`. #[pallet::weight(T::WeightInfo::remove_sub(T::MaxSubAccounts::get()))] - pub(super) fn remove_sub(origin: OriginFor, sub: ::Source) -> DispatchResult { + pub fn remove_sub(origin: OriginFor, sub: ::Source) -> DispatchResult { let sender = ensure_signed(origin)?; ensure!(IdentityOf::::contains_key(&sender), Error::::NoIdentity); let sub = T::Lookup::lookup(sub)?; @@ -1136,7 +1136,7 @@ pub mod pallet { /// NOTE: This should not normally be used, but is provided in the case that the non- /// controller of an account is maliciously registered as a sub-account. #[pallet::weight(T::WeightInfo::quit_sub(T::MaxSubAccounts::get()))] - pub(super) fn quit_sub(origin: OriginFor) -> DispatchResult { + pub fn quit_sub(origin: OriginFor) -> DispatchResult { let sender = ensure_signed(origin)?; let (sup, _) = SuperOf::::take(&sender).ok_or(Error::::NotSub)?; SubsOf::::mutate(&sup, |(ref mut subs_deposit, ref mut sub_ids)| { diff --git a/frame/indices/src/lib.rs b/frame/indices/src/lib.rs index 1470e3abe8661..778173dbc971f 100644 --- a/frame/indices/src/lib.rs +++ b/frame/indices/src/lib.rs @@ -90,7 +90,7 @@ pub mod pallet { /// - DB Weight: 1 Read/Write (Accounts) /// # #[pallet::weight(T::WeightInfo::claim())] - pub(crate) fn claim(origin: OriginFor, index: T::AccountIndex) -> DispatchResult { + pub fn claim(origin: OriginFor, index: T::AccountIndex) -> DispatchResult { let who = ensure_signed(origin)?; Accounts::::try_mutate(index, |maybe_value| { @@ -123,7 +123,7 @@ pub mod pallet { /// - Writes: Indices Accounts, System Account (recipient) /// # #[pallet::weight(T::WeightInfo::transfer())] - pub(crate) fn transfer( + pub fn transfer( origin: OriginFor, new: T::AccountId, index: T::AccountIndex, @@ -162,7 +162,7 @@ pub mod pallet { /// - DB Weight: 1 Read/Write (Accounts) /// # #[pallet::weight(T::WeightInfo::free())] - pub(crate) fn free(origin: OriginFor, index: T::AccountIndex) -> DispatchResult { + pub fn free(origin: OriginFor, index: T::AccountIndex) -> DispatchResult { let who = ensure_signed(origin)?; Accounts::::try_mutate(index, |maybe_value| -> DispatchResult { @@ -198,7 +198,7 @@ pub mod pallet { /// - Writes: Indices Accounts, System Account (original owner) /// # #[pallet::weight(T::WeightInfo::force_transfer())] - pub(crate) fn force_transfer( + pub fn force_transfer( origin: OriginFor, new: T::AccountId, index: T::AccountIndex, @@ -234,7 +234,7 @@ pub mod pallet { /// - DB Weight: 1 Read/Write (Accounts) /// # #[pallet::weight(T::WeightInfo::freeze())] - pub(crate) fn freeze(origin: OriginFor, index: T::AccountIndex) -> DispatchResult { + pub fn freeze(origin: OriginFor, index: T::AccountIndex) -> DispatchResult { let who = ensure_signed(origin)?; Accounts::::try_mutate(index, |maybe_value| -> DispatchResult { diff --git a/frame/lottery/src/lib.rs b/frame/lottery/src/lib.rs index 5d6940c93b3ea..53cadbf02b940 100644 --- a/frame/lottery/src/lib.rs +++ b/frame/lottery/src/lib.rs @@ -286,7 +286,7 @@ pub mod pallet { T::WeightInfo::buy_ticket() .saturating_add(call.get_dispatch_info().weight) )] - pub(crate) fn buy_ticket(origin: OriginFor, call: Box<::Call>) -> DispatchResult { + pub fn buy_ticket(origin: OriginFor, call: Box<::Call>) -> DispatchResult { let caller = ensure_signed(origin.clone())?; call.clone().dispatch(origin).map_err(|e| e.error)?; @@ -301,7 +301,7 @@ pub mod pallet { /// /// This extrinsic must be called by the Manager origin. #[pallet::weight(T::WeightInfo::set_calls(calls.len() as u32))] - pub(crate) fn set_calls(origin: OriginFor, calls: Vec<::Call>) -> DispatchResult { + pub fn set_calls(origin: OriginFor, calls: Vec<::Call>) -> DispatchResult { T::ManagerOrigin::ensure_origin(origin)?; ensure!(calls.len() <= T::MaxCalls::get() as usize, Error::::TooManyCalls); if calls.is_empty() { @@ -325,7 +325,7 @@ pub mod pallet { /// * `delay`: How long after the lottery end we should wait before picking a winner. /// * `repeat`: If the lottery should repeat when completed. #[pallet::weight(T::WeightInfo::start_lottery())] - pub(crate) fn start_lottery( + pub fn start_lottery( origin: OriginFor, price: BalanceOf, length: T::BlockNumber, @@ -363,7 +363,7 @@ pub mod pallet { /// /// This extrinsic must be called by the `ManagerOrigin`. #[pallet::weight(T::WeightInfo::stop_repeat())] - pub(crate) fn stop_repeat(origin: OriginFor) -> DispatchResult { + pub fn stop_repeat(origin: OriginFor) -> DispatchResult { T::ManagerOrigin::ensure_origin(origin)?; Lottery::::mutate(|mut lottery| { if let Some(config) = &mut lottery { diff --git a/frame/multisig/src/lib.rs b/frame/multisig/src/lib.rs index bbb41e7a9287a..bc7ce7029a95b 100644 --- a/frame/multisig/src/lib.rs +++ b/frame/multisig/src/lib.rs @@ -245,7 +245,7 @@ pub mod pallet{ dispatch_info.class, ) })] - pub(super) fn as_multi_threshold_1( + pub fn as_multi_threshold_1( origin: OriginFor, other_signatories: Vec, call: Box<::Call>, @@ -335,7 +335,7 @@ pub mod pallet{ .max(T::WeightInfo::as_multi_complete(s, z)) .saturating_add(*max_weight) })] - pub(super) fn as_multi( + pub fn as_multi( origin: OriginFor, threshold: u16, other_signatories: Vec, @@ -392,7 +392,7 @@ pub mod pallet{ .max(T::WeightInfo::approve_as_multi_complete(s)) .saturating_add(*max_weight) })] - pub(super) fn approve_as_multi( + pub fn approve_as_multi( origin: OriginFor, threshold: u16, other_signatories: Vec, @@ -431,7 +431,7 @@ pub mod pallet{ /// - Write: Multisig Storage, [Caller Account], Refund Account, Calls /// # #[pallet::weight(T::WeightInfo::cancel_as_multi(other_signatories.len() as u32))] - pub(super) fn cancel_as_multi( + pub fn cancel_as_multi( origin: OriginFor, threshold: u16, other_signatories: Vec, diff --git a/frame/nicks/src/lib.rs b/frame/nicks/src/lib.rs index a76d4506f93bc..1e0ef90e0a3ac 100644 --- a/frame/nicks/src/lib.rs +++ b/frame/nicks/src/lib.rs @@ -138,7 +138,7 @@ pub mod pallet { /// - One event. /// # #[pallet::weight(50_000_000)] - pub(super) fn set_name(origin: OriginFor, name: Vec) -> DispatchResult { + pub fn set_name(origin: OriginFor, name: Vec) -> DispatchResult { let sender = ensure_signed(origin)?; ensure!(name.len() >= T::MinLength::get() as usize, Error::::TooShort); @@ -169,7 +169,7 @@ pub mod pallet { /// - One event. /// # #[pallet::weight(70_000_000)] - pub(super) fn clear_name(origin: OriginFor) -> DispatchResult { + pub fn clear_name(origin: OriginFor) -> DispatchResult { let sender = ensure_signed(origin)?; let deposit = >::take(&sender).ok_or(Error::::Unnamed)?.1; @@ -195,7 +195,7 @@ pub mod pallet { /// - One event. /// # #[pallet::weight(70_000_000)] - pub(super) fn kill_name( + pub fn kill_name( origin: OriginFor, target: ::Source ) -> DispatchResult { @@ -225,7 +225,7 @@ pub mod pallet { /// - One event. /// # #[pallet::weight(70_000_000)] - pub(super) fn force_name( + pub fn force_name( origin: OriginFor, target: ::Source, name: Vec diff --git a/frame/proxy/src/lib.rs b/frame/proxy/src/lib.rs index f308dbd28955f..6e78df2c7326d 100644 --- a/frame/proxy/src/lib.rs +++ b/frame/proxy/src/lib.rs @@ -183,7 +183,7 @@ pub mod pallet { .saturating_add(T::DbWeight::get().reads_writes(1, 1)), di.class) })] - pub(super) fn proxy( + pub fn proxy( origin: OriginFor, real: T::AccountId, force_proxy_type: Option, @@ -212,7 +212,7 @@ pub mod pallet { /// Weight is a function of the number of proxies the user has (P). /// # #[pallet::weight(T::WeightInfo::add_proxy(T::MaxProxies::get().into()))] - pub(super) fn add_proxy( + pub fn add_proxy( origin: OriginFor, delegate: T::AccountId, proxy_type: T::ProxyType, @@ -234,7 +234,7 @@ pub mod pallet { /// Weight is a function of the number of proxies the user has (P). /// # #[pallet::weight(T::WeightInfo::remove_proxy(T::MaxProxies::get().into()))] - pub(super) fn remove_proxy( + pub fn remove_proxy( origin: OriginFor, delegate: T::AccountId, proxy_type: T::ProxyType, @@ -255,7 +255,7 @@ pub mod pallet { /// Weight is a function of the number of proxies the user has (P). /// # #[pallet::weight(T::WeightInfo::remove_proxies(T::MaxProxies::get().into()))] - pub(super) fn remove_proxies(origin: OriginFor) -> DispatchResultWithPostInfo { + pub fn remove_proxies(origin: OriginFor) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; let (_, old_deposit) = Proxies::::take(&who); T::Currency::unreserve(&who, old_deposit); @@ -287,7 +287,7 @@ pub mod pallet { /// # /// TODO: Might be over counting 1 read #[pallet::weight(T::WeightInfo::anonymous(T::MaxProxies::get().into()))] - pub(super) fn anonymous( + pub fn anonymous( origin: OriginFor, proxy_type: T::ProxyType, delay: T::BlockNumber, @@ -337,7 +337,7 @@ pub mod pallet { /// Weight is a function of the number of proxies the user has (P). /// # #[pallet::weight(T::WeightInfo::kill_anonymous(T::MaxProxies::get().into()))] - pub(super) fn kill_anonymous( + pub fn kill_anonymous( origin: OriginFor, spawner: T::AccountId, proxy_type: T::ProxyType, @@ -379,7 +379,7 @@ pub mod pallet { /// - P: the number of proxies the user has. /// # #[pallet::weight(T::WeightInfo::announce(T::MaxPending::get(), T::MaxProxies::get().into()))] - pub(super) fn announce( + pub fn announce( origin: OriginFor, real: T::AccountId, call_hash: CallHashOf @@ -430,7 +430,7 @@ pub mod pallet { #[pallet::weight( T::WeightInfo::remove_announcement(T::MaxPending::get(), T::MaxProxies::get().into()) )] - pub(super) fn remove_announcement( + pub fn remove_announcement( origin: OriginFor, real: T::AccountId, call_hash: CallHashOf @@ -460,7 +460,7 @@ pub mod pallet { #[pallet::weight( T::WeightInfo::reject_announcement(T::MaxPending::get(), T::MaxProxies::get().into()) )] - pub(super) fn reject_announcement( + pub fn reject_announcement( origin: OriginFor, delegate: T::AccountId, call_hash: CallHashOf @@ -496,7 +496,7 @@ pub mod pallet { .saturating_add(T::DbWeight::get().reads_writes(1, 1)), di.class) })] - pub(super) fn proxy_announced( + pub fn proxy_announced( origin: OriginFor, delegate: T::AccountId, real: T::AccountId, diff --git a/frame/recovery/src/lib.rs b/frame/recovery/src/lib.rs index 7802f26d1d1fb..6f5c7ebcb6e4a 100644 --- a/frame/recovery/src/lib.rs +++ b/frame/recovery/src/lib.rs @@ -362,7 +362,7 @@ pub mod pallet { dispatch_info.class, ) })] - pub(crate) fn as_recovered( + pub fn as_recovered( origin: OriginFor, account: T::AccountId, call: Box<::Call> @@ -389,7 +389,7 @@ pub mod pallet { /// - One event /// # #[pallet::weight(30_000_000)] - pub(crate) fn set_recovered( + pub fn set_recovered( origin: OriginFor, lost: T::AccountId, rescuer: T::AccountId, @@ -429,7 +429,7 @@ pub mod pallet { /// Total Complexity: O(F + X) /// # #[pallet::weight(100_000_000)] - pub(crate) fn create_recovery( + pub fn create_recovery( origin: OriginFor, friends: Vec, threshold: u16, @@ -491,7 +491,7 @@ pub mod pallet { /// Total Complexity: O(F + X) /// # #[pallet::weight(100_000_000)] - pub(crate) fn initiate_recovery(origin: OriginFor, account: T::AccountId) -> DispatchResult { + pub fn initiate_recovery(origin: OriginFor, account: T::AccountId) -> DispatchResult { let who = ensure_signed(origin)?; // Check that the account is recoverable ensure!(>::contains_key(&account), Error::::NotRecoverable); @@ -538,7 +538,7 @@ pub mod pallet { /// Total Complexity: O(F + logF + V + logV) /// # #[pallet::weight(100_000_000)] - pub(crate) fn vouch_recovery( + pub fn vouch_recovery( origin: OriginFor, lost: T::AccountId, rescuer: T::AccountId @@ -582,7 +582,7 @@ pub mod pallet { /// Total Complexity: O(F + V) /// # #[pallet::weight(100_000_000)] - pub(crate) fn claim_recovery(origin: OriginFor, account: T::AccountId) -> DispatchResult { + pub fn claim_recovery(origin: OriginFor, account: T::AccountId) -> DispatchResult { let who = ensure_signed(origin)?; // Get the recovery configuration for the lost account let recovery_config = Self::recovery_config(&account).ok_or(Error::::NotRecoverable)?; @@ -628,7 +628,7 @@ pub mod pallet { /// Total Complexity: O(V + X) /// # #[pallet::weight(30_000_000)] - pub(crate) fn close_recovery(origin: OriginFor, rescuer: T::AccountId) -> DispatchResult { + pub fn close_recovery(origin: OriginFor, rescuer: T::AccountId) -> DispatchResult { let who = ensure_signed(origin)?; // Take the active recovery process started by the rescuer for this account. let active_recovery = >::take(&who, &rescuer).ok_or(Error::::NotStarted)?; @@ -662,7 +662,7 @@ pub mod pallet { /// Total Complexity: O(F + X) /// # #[pallet::weight(30_000_000)] - pub(crate) fn remove_recovery(origin: OriginFor) -> DispatchResult { + pub fn remove_recovery(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; // Check there are no active recoveries let mut active_recoveries = >::iter_prefix_values(&who); @@ -688,7 +688,7 @@ pub mod pallet { /// - One storage mutation to check account is recovered by `who`. O(1) /// # #[pallet::weight(30_000_000)] - pub(crate) fn cancel_recovered(origin: OriginFor, account: T::AccountId) -> DispatchResult { + pub fn cancel_recovered(origin: OriginFor, account: T::AccountId) -> DispatchResult { let who = ensure_signed(origin)?; // Check `who` is allowed to make a call on behalf of `account` ensure!(Self::proxy(&who) == Some(account), Error::::NotAllowed); diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index 006ab5a0f2d75..950bbde8bc499 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -346,7 +346,7 @@ pub mod pallet { /// - Will use base weight of 25 which should be good for up to 30 scheduled calls /// # #[pallet::weight(::WeightInfo::schedule(T::MaxScheduledPerBlock::get()))] - pub(crate) fn schedule( + pub fn schedule( origin: OriginFor, when: T::BlockNumber, maybe_periodic: Option>, @@ -376,7 +376,7 @@ pub mod pallet { /// - Will use base weight of 100 which should be good for up to 30 scheduled calls /// # #[pallet::weight(::WeightInfo::cancel(T::MaxScheduledPerBlock::get()))] - pub(crate) fn cancel(origin: OriginFor, when: T::BlockNumber, index: u32) -> DispatchResult { + pub fn cancel(origin: OriginFor, when: T::BlockNumber, index: u32) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; let origin = ::Origin::from(origin); Self::do_cancel(Some(origin.caller().clone()), (when, index))?; @@ -394,7 +394,7 @@ pub mod pallet { /// - Will use base weight of 35 which should be good for more than 30 scheduled calls /// # #[pallet::weight(::WeightInfo::schedule_named(T::MaxScheduledPerBlock::get()))] - pub(crate) fn schedule_named( + pub fn schedule_named( origin: OriginFor, id: Vec, when: T::BlockNumber, @@ -426,7 +426,7 @@ pub mod pallet { /// - Will use base weight of 100 which should be good for up to 30 scheduled calls /// # #[pallet::weight(::WeightInfo::cancel_named(T::MaxScheduledPerBlock::get()))] - pub(crate) fn cancel_named(origin: OriginFor, id: Vec) -> DispatchResult { + pub fn cancel_named(origin: OriginFor, id: Vec) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; let origin = ::Origin::from(origin); Self::do_cancel_named(Some(origin.caller().clone()), id)?; @@ -439,7 +439,7 @@ pub mod pallet { /// Same as [`schedule`]. /// # #[pallet::weight(::WeightInfo::schedule(T::MaxScheduledPerBlock::get()))] - pub(crate) fn schedule_after( + pub fn schedule_after( origin: OriginFor, after: T::BlockNumber, maybe_periodic: Option>, @@ -464,7 +464,7 @@ pub mod pallet { /// Same as [`schedule_named`]. /// # #[pallet::weight(::WeightInfo::schedule_named(T::MaxScheduledPerBlock::get()))] - pub(crate) fn schedule_named_after( + pub fn schedule_named_after( origin: OriginFor, id: Vec, after: T::BlockNumber, diff --git a/frame/sudo/src/lib.rs b/frame/sudo/src/lib.rs index 51cc1df050709..6f70ddda99f64 100644 --- a/frame/sudo/src/lib.rs +++ b/frame/sudo/src/lib.rs @@ -144,7 +144,7 @@ pub mod pallet { let dispatch_info = call.get_dispatch_info(); (dispatch_info.weight.saturating_add(10_000), dispatch_info.class) })] - pub(crate) fn sudo( + pub fn sudo( origin: OriginFor, call: Box<::Call>, ) -> DispatchResultWithPostInfo { @@ -169,7 +169,7 @@ pub mod pallet { /// - The weight of this call is defined by the caller. /// # #[pallet::weight((*_weight, call.get_dispatch_info().class))] - pub(crate) fn sudo_unchecked_weight( + pub fn sudo_unchecked_weight( origin: OriginFor, call: Box<::Call>, _weight: Weight, @@ -194,7 +194,7 @@ pub mod pallet { /// - One DB change. /// # #[pallet::weight(0)] - pub(crate) fn set_key( + pub fn set_key( origin: OriginFor, new: ::Source, ) -> DispatchResultWithPostInfo { @@ -230,7 +230,7 @@ pub mod pallet { dispatch_info.class, ) })] - pub(crate) fn sudo_as( + pub fn sudo_as( origin: OriginFor, who: ::Source, call: Box<::Call> diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 4e830c26691e8..1d4d7e461834c 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -2071,7 +2071,7 @@ pub mod pallet_prelude { /// impl, I: 'static> Pallet { /// /// Doc comment put in metadata /// #[pallet::weight(0)] -/// fn toto(origin: OriginFor, #[pallet::compact] _foo: u32) -> DispatchResultWithPostInfo { +/// pub fn toto(origin: OriginFor, #[pallet::compact] _foo: u32) -> DispatchResultWithPostInfo { /// let _ = origin; /// unimplemented!(); /// } diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index f0597ea2fe0f3..e3a110f2e7e2c 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -301,7 +301,7 @@ pub mod pallet { // TODO: This should only be available for testing, rather than in general usage, but // that's not possible at present (since it's within the pallet macro). #[pallet::weight(*_ratio * T::BlockWeights::get().max_block)] - pub(crate) fn fill_block(origin: OriginFor, _ratio: Perbill) -> DispatchResultWithPostInfo { + pub fn fill_block(origin: OriginFor, _ratio: Perbill) -> DispatchResultWithPostInfo { ensure_root(origin)?; Ok(().into()) } @@ -312,7 +312,7 @@ pub mod pallet { /// - `O(1)` /// # #[pallet::weight(T::SystemWeightInfo::remark(_remark.len() as u32))] - pub(crate) fn remark(origin: OriginFor, _remark: Vec) -> DispatchResultWithPostInfo { + pub fn remark(origin: OriginFor, _remark: Vec) -> DispatchResultWithPostInfo { ensure_signed(origin)?; Ok(().into()) } @@ -326,7 +326,7 @@ pub mod pallet { /// - 1 write to HEAP_PAGES /// # #[pallet::weight((T::SystemWeightInfo::set_heap_pages(), DispatchClass::Operational))] - pub(crate) fn set_heap_pages(origin: OriginFor, pages: u64) -> DispatchResultWithPostInfo { + pub fn set_heap_pages(origin: OriginFor, pages: u64) -> DispatchResultWithPostInfo { ensure_root(origin)?; storage::unhashed::put_raw(well_known_keys::HEAP_PAGES, &pages.encode()); Ok(().into()) @@ -414,7 +414,7 @@ pub mod pallet { T::SystemWeightInfo::set_storage(items.len() as u32), DispatchClass::Operational, ))] - pub(crate) fn set_storage(origin: OriginFor, items: Vec) -> DispatchResultWithPostInfo { + pub fn set_storage(origin: OriginFor, items: Vec) -> DispatchResultWithPostInfo { ensure_root(origin)?; for i in &items { storage::unhashed::put_raw(&i.0, &i.1); @@ -434,7 +434,7 @@ pub mod pallet { T::SystemWeightInfo::kill_storage(keys.len() as u32), DispatchClass::Operational, ))] - pub(crate) fn kill_storage(origin: OriginFor, keys: Vec) -> DispatchResultWithPostInfo { + pub fn kill_storage(origin: OriginFor, keys: Vec) -> DispatchResultWithPostInfo { ensure_root(origin)?; for key in &keys { storage::unhashed::kill(&key); @@ -457,7 +457,7 @@ pub mod pallet { T::SystemWeightInfo::kill_prefix(_subkeys.saturating_add(1)), DispatchClass::Operational, ))] - pub(crate) fn kill_prefix( + pub fn kill_prefix( origin: OriginFor, prefix: Key, _subkeys: u32, @@ -474,7 +474,7 @@ pub mod pallet { /// - 1 event. /// # #[pallet::weight(T::SystemWeightInfo::remark_with_event(remark.len() as u32))] - pub(crate) fn remark_with_event(origin: OriginFor, remark: Vec) -> DispatchResultWithPostInfo { + pub fn remark_with_event(origin: OriginFor, remark: Vec) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; let hash = T::Hashing::hash(&remark[..]); Self::deposit_event(Event::Remarked(who, hash)); diff --git a/frame/timestamp/src/lib.rs b/frame/timestamp/src/lib.rs index 3315fadb1c1cc..f7dd7378d8ab5 100644 --- a/frame/timestamp/src/lib.rs +++ b/frame/timestamp/src/lib.rs @@ -184,7 +184,7 @@ pub mod pallet { T::WeightInfo::set(), DispatchClass::Mandatory ))] - pub(super) fn set(origin: OriginFor, #[pallet::compact] now: T::Moment) -> DispatchResult { + pub fn set(origin: OriginFor, #[pallet::compact] now: T::Moment) -> DispatchResult { ensure_none(origin)?; assert!(!DidUpdate::::exists(), "Timestamp must be updated only once in the block"); let prev = Self::now(); diff --git a/frame/transaction-storage/src/lib.rs b/frame/transaction-storage/src/lib.rs index ef824a8399f57..97dfd76fe6773 100644 --- a/frame/transaction-storage/src/lib.rs +++ b/frame/transaction-storage/src/lib.rs @@ -174,7 +174,7 @@ pub mod pallet { /// Additionally contains a DB write. /// # #[pallet::weight(T::WeightInfo::store(data.len() as u32))] - pub(super) fn store( + pub fn store( origin: OriginFor, data: Vec, ) -> DispatchResult { @@ -220,7 +220,7 @@ pub mod pallet { /// - Constant. /// # #[pallet::weight(T::WeightInfo::renew())] - pub(super) fn renew( + pub fn renew( origin: OriginFor, block: T::BlockNumber, index: u32, @@ -261,7 +261,7 @@ pub mod pallet { /// Here we assume a maximum of 100 probed transactions. /// # #[pallet::weight((T::WeightInfo::check_proof_max(), DispatchClass::Mandatory))] - pub(super) fn check_proof( + pub fn check_proof( origin: OriginFor, proof: TransactionStorageProof, ) -> DispatchResultWithPostInfo { diff --git a/frame/uniques/src/lib.rs b/frame/uniques/src/lib.rs index f4a0228de4a89..28518843c96fc 100644 --- a/frame/uniques/src/lib.rs +++ b/frame/uniques/src/lib.rs @@ -297,7 +297,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::create())] - pub(super) fn create( + pub fn create( origin: OriginFor, #[pallet::compact] class: T::ClassId, admin: ::Source, @@ -346,7 +346,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::force_create())] - pub(super) fn force_create( + pub fn force_create( origin: OriginFor, #[pallet::compact] class: T::ClassId, owner: ::Source, @@ -396,7 +396,7 @@ pub mod pallet { witness.instance_metadatas, witness.attributes, ))] - pub(super) fn destroy( + pub fn destroy( origin: OriginFor, #[pallet::compact] class: T::ClassId, witness: DestroyWitness, @@ -441,7 +441,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::mint())] - pub(super) fn mint( + pub fn mint( origin: OriginFor, #[pallet::compact] class: T::ClassId, #[pallet::compact] instance: T::InstanceId, @@ -470,7 +470,7 @@ pub mod pallet { /// Weight: `O(1)` /// Modes: `check_owner.is_some()`. #[pallet::weight(T::WeightInfo::burn())] - pub(super) fn burn( + pub fn burn( origin: OriginFor, #[pallet::compact] class: T::ClassId, #[pallet::compact] instance: T::InstanceId, @@ -503,7 +503,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::transfer())] - pub(super) fn transfer( + pub fn transfer( origin: OriginFor, #[pallet::compact] class: T::ClassId, #[pallet::compact] instance: T::InstanceId, @@ -539,7 +539,7 @@ pub mod pallet { /// /// Weight: `O(instances.len())` #[pallet::weight(T::WeightInfo::redeposit(instances.len() as u32))] - pub(super) fn redeposit( + pub fn redeposit( origin: OriginFor, #[pallet::compact] class: T::ClassId, instances: Vec, @@ -595,7 +595,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::freeze())] - pub(super) fn freeze( + pub fn freeze( origin: OriginFor, #[pallet::compact] class: T::ClassId, #[pallet::compact] instance: T::InstanceId, @@ -625,7 +625,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::thaw())] - pub(super) fn thaw( + pub fn thaw( origin: OriginFor, #[pallet::compact] class: T::ClassId, #[pallet::compact] instance: T::InstanceId, @@ -654,7 +654,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::freeze_class())] - pub(super) fn freeze_class( + pub fn freeze_class( origin: OriginFor, #[pallet::compact] class: T::ClassId ) -> DispatchResult { @@ -681,7 +681,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::thaw_class())] - pub(super) fn thaw_class( + pub fn thaw_class( origin: OriginFor, #[pallet::compact] class: T::ClassId ) -> DispatchResult { @@ -709,7 +709,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::transfer_ownership())] - pub(super) fn transfer_ownership( + pub fn transfer_ownership( origin: OriginFor, #[pallet::compact] class: T::ClassId, owner: ::Source, @@ -751,7 +751,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::set_team())] - pub(super) fn set_team( + pub fn set_team( origin: OriginFor, #[pallet::compact] class: T::ClassId, issuer: ::Source, @@ -788,7 +788,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::approve_transfer())] - pub(super) fn approve_transfer( + pub fn approve_transfer( origin: OriginFor, #[pallet::compact] class: T::ClassId, #[pallet::compact] instance: T::InstanceId, @@ -835,7 +835,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::cancel_approval())] - pub(super) fn cancel_approval( + pub fn cancel_approval( origin: OriginFor, #[pallet::compact] class: T::ClassId, #[pallet::compact] instance: T::InstanceId, @@ -882,7 +882,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::force_asset_status())] - pub(super) fn force_asset_status( + pub fn force_asset_status( origin: OriginFor, #[pallet::compact] class: T::ClassId, owner: ::Source, @@ -927,7 +927,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::set_attribute())] - pub(super) fn set_attribute( + pub fn set_attribute( origin: OriginFor, #[pallet::compact] class: T::ClassId, maybe_instance: Option, @@ -992,7 +992,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::clear_attribute())] - pub(super) fn clear_attribute( + pub fn clear_attribute( origin: OriginFor, #[pallet::compact] class: T::ClassId, maybe_instance: Option, @@ -1041,7 +1041,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::set_metadata())] - pub(super) fn set_metadata( + pub fn set_metadata( origin: OriginFor, #[pallet::compact] class: T::ClassId, #[pallet::compact] instance: T::InstanceId, @@ -1107,7 +1107,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::clear_metadata())] - pub(super) fn clear_metadata( + pub fn clear_metadata( origin: OriginFor, #[pallet::compact] class: T::ClassId, #[pallet::compact] instance: T::InstanceId, @@ -1156,7 +1156,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::set_class_metadata())] - pub(super) fn set_class_metadata( + pub fn set_class_metadata( origin: OriginFor, #[pallet::compact] class: T::ClassId, data: BoundedVec, @@ -1216,7 +1216,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::clear_class_metadata())] - pub(super) fn clear_class_metadata( + pub fn clear_class_metadata( origin: OriginFor, #[pallet::compact] class: T::ClassId, ) -> DispatchResult { From 350ba1293319713f0587d7260863108c7e6e200e Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Sat, 12 Jun 2021 15:59:56 +0100 Subject: [PATCH 36/61] Fixes in Assets Pallet (#9059) * upper bound witness with refund * simple test * track approvals * dont allow approvals when asset is frozen * destroy returns approval deposit * update `NonTransfer` proxies * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_assets --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/assets/src/weights.rs --template=./.maintain/frame-weight-template.hbs Co-authored-by: Parity Bot --- bin/node/runtime/src/lib.rs | 2 + frame/assets/src/lib.rs | 51 ++++++++++--- frame/assets/src/tests.rs | 76 +++++++++++++++++- frame/assets/src/weights.rs | 148 ++++++++++++++++++------------------ 4 files changed, 194 insertions(+), 83 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 14bf16d19778e..13189b1ff898a 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -268,6 +268,8 @@ impl InstanceFilter for ProxyType { ProxyType::NonTransfer => !matches!( c, Call::Balances(..) | + Call::Assets(..) | + Call::Uniques(..) | Call::Vesting(pallet_vesting::Call::vested_transfer(..)) | Call::Indices(pallet_indices::Call::transfer(..)) ), diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index afcdb5b054d04..44ecbe98a017f 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -468,6 +468,10 @@ pub mod pallet { /// /// Emits `Destroyed` event when successful. /// + /// NOTE: It can be helpful to first freeze an asset before destroying it so that you + /// can provide accurate witness information and prevent users from manipulating state + /// in a way that can make it harder to destroy. + /// /// Weight: `O(c + p + a)` where: /// - `c = (witness.accounts - witness.sufficients)` /// - `s = witness.sufficients` @@ -481,7 +485,7 @@ pub mod pallet { origin: OriginFor, #[pallet::compact] id: T::AssetId, witness: DestroyWitness, - ) -> DispatchResult { + ) -> DispatchResultWithPostInfo { let maybe_check_owner = match T::ForceOrigin::try_origin(origin) { Ok(_) => None, Err(origin) => Some(ensure_signed(origin)?), @@ -491,9 +495,9 @@ pub mod pallet { if let Some(check_owner) = maybe_check_owner { ensure!(details.owner == check_owner, Error::::NoPermission); } - ensure!(details.accounts == witness.accounts, Error::::BadWitness); - ensure!(details.sufficients == witness.sufficients, Error::::BadWitness); - ensure!(details.approvals == witness.approvals, Error::::BadWitness); + ensure!(details.accounts <= witness.accounts, Error::::BadWitness); + ensure!(details.sufficients <= witness.sufficients, Error::::BadWitness); + ensure!(details.approvals <= witness.approvals, Error::::BadWitness); for (who, v) in Account::::drain_prefix(id) { Self::dead_account(id, &who, &mut details, v.sufficient); @@ -507,11 +511,18 @@ pub mod pallet { details.deposit.saturating_add(metadata.deposit), ); - Approvals::::remove_prefix((&id,)); + for ((owner, _), approval) in Approvals::::drain_prefix((&id,)) { + T::Currency::unreserve(&owner, approval.deposit); + } Self::deposit_event(Event::Destroyed(id)); - // NOTE: could use postinfo to reflect the actual number of accounts/sufficient/approvals - Ok(()) + Ok( + Some(T::WeightInfo::destroy( + details.accounts.saturating_sub(details.sufficients), + details.sufficients, + details.approvals, + )).into() + ) }) } @@ -1134,8 +1145,18 @@ pub mod pallet { let owner = ensure_signed(origin)?; let delegate = T::Lookup::lookup(delegate)?; + let mut d = Asset::::get(id).ok_or(Error::::Unknown)?; + ensure!(!d.is_frozen, Error::::Frozen); Approvals::::try_mutate((id, &owner, &delegate), |maybe_approved| -> DispatchResult { - let mut approved = maybe_approved.take().unwrap_or_default(); + let mut approved = match maybe_approved.take() { + // an approval already exists and is being updated + Some(a) => a, + // a new approval is created + None => { + d.approvals.saturating_inc(); + Default::default() + } + }; let deposit_required = T::ApprovalDeposit::get(); if approved.deposit < deposit_required { T::Currency::reserve(&owner, deposit_required - approved.deposit)?; @@ -1145,6 +1166,7 @@ pub mod pallet { *maybe_approved = Some(approved); Ok(()) })?; + Asset::::insert(id, d); Self::deposit_event(Event::ApprovedTransfer(id, owner, delegate, amount)); Ok(()) @@ -1171,9 +1193,13 @@ pub mod pallet { ) -> DispatchResult { let owner = ensure_signed(origin)?; let delegate = T::Lookup::lookup(delegate)?; + let mut d = Asset::::get(id).ok_or(Error::::Unknown)?; let approval = Approvals::::take((id, &owner, &delegate)).ok_or(Error::::Unknown)?; T::Currency::unreserve(&owner, approval.deposit); + d.approvals.saturating_dec(); + Asset::::insert(id, d); + Self::deposit_event(Event::ApprovalCancelled(id, owner, delegate)); Ok(()) } @@ -1198,11 +1224,11 @@ pub mod pallet { owner: ::Source, delegate: ::Source, ) -> DispatchResult { + let mut d = Asset::::get(id).ok_or(Error::::Unknown)?; T::ForceOrigin::try_origin(origin) .map(|_| ()) .or_else(|origin| -> DispatchResult { let origin = ensure_signed(origin)?; - let d = Asset::::get(id).ok_or(Error::::Unknown)?; ensure!(&origin == &d.admin, Error::::NoPermission); Ok(()) })?; @@ -1212,6 +1238,8 @@ pub mod pallet { let approval = Approvals::::take((id, &owner, &delegate)).ok_or(Error::::Unknown)?; T::Currency::unreserve(&owner, approval.deposit); + d.approvals.saturating_dec(); + Asset::::insert(id, d); Self::deposit_event(Event::ApprovalCancelled(id, owner, delegate)); Ok(()) @@ -1263,6 +1291,11 @@ pub mod pallet { if remaining.is_zero() { T::Currency::unreserve(&owner, approved.deposit); + Asset::::mutate(id, |maybe_details| { + if let Some(details) = maybe_details { + details.approvals.saturating_dec(); + } + }); } else { approved.amount = remaining; *maybe_approved = Some(approved); diff --git a/frame/assets/src/tests.rs b/frame/assets/src/tests.rs index 6bef5b962de74..b561864c8e481 100644 --- a/frame/assets/src/tests.rs +++ b/frame/assets/src/tests.rs @@ -37,19 +37,47 @@ fn basic_minting_should_work() { #[test] fn approval_lifecycle_works() { new_test_ext().execute_with(|| { + // can't approve non-existent token + assert_noop!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50), Error::::Unknown); + // so we create it :) assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); Balances::make_free_balance_be(&1, 1); assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); + assert_eq!(Asset::::get(0).unwrap().approvals, 1); assert_eq!(Balances::reserved_balance(&1), 1); assert_ok!(Assets::transfer_approved(Origin::signed(2), 0, 1, 3, 40)); + assert_eq!(Asset::::get(0).unwrap().approvals, 1); assert_ok!(Assets::cancel_approval(Origin::signed(1), 0, 2)); + assert_eq!(Asset::::get(0).unwrap().approvals, 0); assert_eq!(Assets::balance(0, 1), 60); assert_eq!(Assets::balance(0, 3), 40); assert_eq!(Balances::reserved_balance(&1), 0); }); } +#[test] +fn transfer_approved_all_funds() { + new_test_ext().execute_with(|| { + // can't approve non-existent token + assert_noop!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50), Error::::Unknown); + // so we create it :) + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + Balances::make_free_balance_be(&1, 1); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); + assert_eq!(Asset::::get(0).unwrap().approvals, 1); + assert_eq!(Balances::reserved_balance(&1), 1); + + // transfer the full amount, which should trigger auto-cleanup + assert_ok!(Assets::transfer_approved(Origin::signed(2), 0, 1, 3, 50)); + assert_eq!(Asset::::get(0).unwrap().approvals, 0); + assert_eq!(Assets::balance(0, 1), 50); + assert_eq!(Assets::balance(0, 3), 50); + assert_eq!(Balances::reserved_balance(&1), 0); + }); +} + #[test] fn approval_deposits_work() { new_test_ext().execute_with(|| { @@ -102,10 +130,13 @@ fn cancel_approval_works() { assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); Balances::make_free_balance_be(&1, 1); assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); + assert_eq!(Asset::::get(0).unwrap().approvals, 1); assert_noop!(Assets::cancel_approval(Origin::signed(1), 1, 2), Error::::Unknown); assert_noop!(Assets::cancel_approval(Origin::signed(2), 0, 2), Error::::Unknown); assert_noop!(Assets::cancel_approval(Origin::signed(1), 0, 3), Error::::Unknown); + assert_eq!(Asset::::get(0).unwrap().approvals, 1); assert_ok!(Assets::cancel_approval(Origin::signed(1), 0, 2)); + assert_eq!(Asset::::get(0).unwrap().approvals, 0); assert_noop!(Assets::cancel_approval(Origin::signed(1), 0, 2), Error::::Unknown); }); } @@ -117,12 +148,15 @@ fn force_cancel_approval_works() { assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); Balances::make_free_balance_be(&1, 1); assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); + assert_eq!(Asset::::get(0).unwrap().approvals, 1); let e = Error::::NoPermission; assert_noop!(Assets::force_cancel_approval(Origin::signed(2), 0, 1, 2), e); assert_noop!(Assets::force_cancel_approval(Origin::signed(1), 1, 1, 2), Error::::Unknown); assert_noop!(Assets::force_cancel_approval(Origin::signed(1), 0, 2, 2), Error::::Unknown); assert_noop!(Assets::force_cancel_approval(Origin::signed(1), 0, 1, 3), Error::::Unknown); + assert_eq!(Asset::::get(0).unwrap().approvals, 1); assert_ok!(Assets::force_cancel_approval(Origin::signed(1), 0, 1, 2)); + assert_eq!(Asset::::get(0).unwrap().approvals, 0); assert_noop!(Assets::force_cancel_approval(Origin::signed(1), 0, 1, 2), Error::::Unknown); }); } @@ -180,9 +214,35 @@ fn destroy_with_bad_witness_should_not_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); - let w = Asset::::get(0).unwrap().destroy_witness(); + let mut w = Asset::::get(0).unwrap().destroy_witness(); assert_ok!(Assets::mint(Origin::signed(1), 0, 10, 100)); + // witness too low assert_noop!(Assets::destroy(Origin::signed(1), 0, w), Error::::BadWitness); + // witness too high is okay though + w.accounts += 2; + w.sufficients += 2; + assert_ok!(Assets::destroy(Origin::signed(1), 0, w)); + + }); +} + +#[test] +fn destroy_should_refund_approvals() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 10, 100)); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 3, 50)); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 4, 50)); + assert_eq!(Balances::reserved_balance(&1), 3); + + let w = Asset::::get(0).unwrap().destroy_witness(); + assert_ok!(Assets::destroy(Origin::signed(1), 0, w)); + assert_eq!(Balances::reserved_balance(&1), 0); + + // all approvals are removed + assert!(Approvals::::iter().count().is_zero()) }); } @@ -306,6 +366,20 @@ fn transferring_frozen_asset_should_not_work() { }); } +#[test] +fn approve_transfer_frozen_asset_should_not_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_ok!(Assets::freeze_asset(Origin::signed(1), 0)); + assert_noop!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50), Error::::Frozen); + assert_ok!(Assets::thaw_asset(Origin::signed(1), 0)); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); + }); +} + #[test] fn origin_guards_should_work() { new_test_ext().execute_with(|| { diff --git a/frame/assets/src/weights.rs b/frame/assets/src/weights.rs index c3c804a392dbe..77db7fa4f05ba 100644 --- a/frame/assets/src/weights.rs +++ b/frame/assets/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_assets //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-03-08, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-06-10, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -73,267 +73,269 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn create() -> Weight { - (48_305_000 as Weight) + (52_735_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_create() -> Weight { - (23_827_000 as Weight) + (26_570_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn destroy(c: u32, s: u32, a: u32, ) -> Weight { (0 as Weight) - // Standard Error: 38_000 - .saturating_add((24_232_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 38_000 - .saturating_add((30_467_000 as Weight).saturating_mul(s as Weight)) - // Standard Error: 383_000 - .saturating_add((2_343_000 as Weight).saturating_mul(a as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) + // Standard Error: 93_000 + .saturating_add((31_110_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 93_000 + .saturating_add((38_908_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 935_000 + .saturating_add((42_765_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(c as Weight))) .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(s as Weight))) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(a as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((2 as Weight).saturating_mul(c as Weight))) .saturating_add(T::DbWeight::get().writes((2 as Weight).saturating_mul(s as Weight))) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(a as Weight))) } fn mint() -> Weight { - (46_433_000 as Weight) + (58_399_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn burn() -> Weight { - (46_000_000 as Weight) + (65_917_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn transfer() -> Weight { - (70_793_000 as Weight) + (100_407_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn transfer_keep_alive() -> Weight { - (57_453_000 as Weight) + (84_243_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn force_transfer() -> Weight { - (70_968_000 as Weight) + (100_407_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn freeze() -> Weight { - (34_290_000 as Weight) + (37_831_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn thaw() -> Weight { - (34_419_000 as Weight) + (37_660_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn freeze_asset() -> Weight { - (24_373_000 as Weight) + (27_175_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn thaw_asset() -> Weight { - (24_096_000 as Weight) + (26_884_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn transfer_ownership() -> Weight { - (28_566_000 as Weight) + (31_877_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_team() -> Weight { - (25_297_000 as Weight) + (27_947_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_metadata(_n: u32, s: u32, ) -> Weight { - (53_367_000 as Weight) + (57_993_000 as Weight) // Standard Error: 0 - .saturating_add((8_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((12_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn clear_metadata() -> Weight { - (51_721_000 as Weight) + (57_820_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_set_metadata(_n: u32, s: u32, ) -> Weight { - (27_117_000 as Weight) + (30_830_000 as Weight) // Standard Error: 0 - .saturating_add((5_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((7_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_clear_metadata() -> Weight { - (51_598_000 as Weight) + (57_292_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_asset_status() -> Weight { - (23_366_000 as Weight) + (26_750_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn approve_transfer() -> Weight { - (47_906_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + (65_598_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn transfer_approved() -> Weight { - (90_338_000 as Weight) + (131_312_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(5 as Weight)) } fn cancel_approval() -> Weight { - (48_591_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + (66_904_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn force_cancel_approval() -> Weight { - (54_879_000 as Weight) + (67_525_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } } // For backwards compatibility and tests impl WeightInfo for () { fn create() -> Weight { - (48_305_000 as Weight) + (52_735_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_create() -> Weight { - (23_827_000 as Weight) + (26_570_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn destroy(c: u32, s: u32, a: u32, ) -> Weight { (0 as Weight) - // Standard Error: 38_000 - .saturating_add((24_232_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 38_000 - .saturating_add((30_467_000 as Weight).saturating_mul(s as Weight)) - // Standard Error: 383_000 - .saturating_add((2_343_000 as Weight).saturating_mul(a as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + // Standard Error: 93_000 + .saturating_add((31_110_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 93_000 + .saturating_add((38_908_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 935_000 + .saturating_add((42_765_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(c as Weight))) .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(s as Weight))) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(a as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) .saturating_add(RocksDbWeight::get().writes((2 as Weight).saturating_mul(c as Weight))) .saturating_add(RocksDbWeight::get().writes((2 as Weight).saturating_mul(s as Weight))) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(a as Weight))) } fn mint() -> Weight { - (46_433_000 as Weight) + (58_399_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn burn() -> Weight { - (46_000_000 as Weight) + (65_917_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn transfer() -> Weight { - (70_793_000 as Weight) + (100_407_000 as Weight) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn transfer_keep_alive() -> Weight { - (57_453_000 as Weight) + (84_243_000 as Weight) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn force_transfer() -> Weight { - (70_968_000 as Weight) + (100_407_000 as Weight) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn freeze() -> Weight { - (34_290_000 as Weight) + (37_831_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn thaw() -> Weight { - (34_419_000 as Weight) + (37_660_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn freeze_asset() -> Weight { - (24_373_000 as Weight) + (27_175_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn thaw_asset() -> Weight { - (24_096_000 as Weight) + (26_884_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn transfer_ownership() -> Weight { - (28_566_000 as Weight) + (31_877_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn set_team() -> Weight { - (25_297_000 as Weight) + (27_947_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn set_metadata(_n: u32, s: u32, ) -> Weight { - (53_367_000 as Weight) + (57_993_000 as Weight) // Standard Error: 0 - .saturating_add((8_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((12_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn clear_metadata() -> Weight { - (51_721_000 as Weight) + (57_820_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_set_metadata(_n: u32, s: u32, ) -> Weight { - (27_117_000 as Weight) + (30_830_000 as Weight) // Standard Error: 0 - .saturating_add((5_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((7_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_clear_metadata() -> Weight { - (51_598_000 as Weight) + (57_292_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_asset_status() -> Weight { - (23_366_000 as Weight) + (26_750_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn approve_transfer() -> Weight { - (47_906_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + (65_598_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn transfer_approved() -> Weight { - (90_338_000 as Weight) + (131_312_000 as Weight) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } fn cancel_approval() -> Weight { - (48_591_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + (66_904_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn force_cancel_approval() -> Weight { - (54_879_000 as Weight) + (67_525_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } } From 1f16a6a41b973bbdd800ce07ac68c6055400a321 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Sat, 12 Jun 2021 16:58:36 +0100 Subject: [PATCH 37/61] im-online: send heartbeats at a random period (#8819) * im-online: send heartbeats at a random period * support: use permill to represent session progress * im-online: increase probability of heartbeating with session progress * babe, session: fix tests * babe: fix test --- frame/babe/src/lib.rs | 6 +- frame/babe/src/tests.rs | 6 +- frame/im-online/src/lib.rs | 44 +++++++++---- frame/im-online/src/mock.rs | 6 +- frame/im-online/src/tests.rs | 86 +++++++++++++++++++++++++- frame/session/src/lib.rs | 8 +-- frame/session/src/tests.rs | 14 ++--- frame/support/src/traits/validation.rs | 6 +- 8 files changed, 140 insertions(+), 36 deletions(-) diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index a0a9e01eaa26c..6ec199925be17 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -31,7 +31,7 @@ use sp_application_crypto::Public; use sp_runtime::{ generic::DigestItem, traits::{IsMember, One, SaturatedConversion, Saturating, Zero}, - ConsensusEngineId, KeyTypeId, Percent, + ConsensusEngineId, KeyTypeId, Permill, }; use sp_session::{GetSessionNumber, GetValidatorCount}; use sp_std::prelude::*; @@ -848,11 +848,11 @@ impl frame_support::traits::EstimateNextSessionRotation (Option, Weight) { + fn estimate_current_session_progress(_now: T::BlockNumber) -> (Option, Weight) { let elapsed = CurrentSlot::::get().saturating_sub(Self::current_epoch_start()) + 1; ( - Some(Percent::from_rational( + Some(Permill::from_rational( *elapsed, T::EpochDuration::get(), )), diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index 6aa80e9697339..dfb398a4f4775 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -236,10 +236,10 @@ fn can_estimate_current_epoch_progress() { if Babe::estimate_next_session_rotation(i).0.unwrap() - 1 == i { assert_eq!( Babe::estimate_current_session_progress(i).0.unwrap(), - Percent::from_percent(100) + Permill::from_percent(100) ); } else { - assert!(Babe::estimate_current_session_progress(i).0.unwrap() < Percent::from_percent(100)); + assert!(Babe::estimate_current_session_progress(i).0.unwrap() < Permill::from_percent(100)); } } @@ -247,7 +247,7 @@ fn can_estimate_current_epoch_progress() { progress_to_block(4); assert_eq!( Babe::estimate_current_session_progress(4).0.unwrap(), - Percent::from_percent(33), + Permill::from_float(1.0 / 3.0), ); }) } diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index bddb286fad739..e132f7f929a06 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -81,8 +81,8 @@ use sp_std::prelude::*; use sp_std::convert::TryInto; use sp_runtime::{ offchain::storage::StorageValueRef, - traits::{AtLeast32BitUnsigned, Convert, Saturating}, - Perbill, Percent, RuntimeDebug, + traits::{AtLeast32BitUnsigned, Convert, Saturating, TrailingZeroInput}, + Perbill, Permill, PerThing, RuntimeDebug, SaturatedConversion, }; use sp_staking::{ SessionIndex, @@ -571,23 +571,46 @@ impl Pallet { pub(crate) fn send_heartbeats( block_number: T::BlockNumber, ) -> OffchainResult>> { - const HALF_SESSION: Percent = Percent::from_percent(50); + const START_HEARTBEAT_RANDOM_PERIOD: Permill = Permill::from_percent(10); + const START_HEARTBEAT_FINAL_PERIOD: Permill = Permill::from_percent(80); + + // this should give us a residual probability of 1/SESSION_LENGTH of sending an heartbeat, + // i.e. all heartbeats spread uniformly, over most of the session. as the session progresses + // the probability of sending an heartbeat starts to increase exponentially. + let random_choice = |progress: Permill| { + // given session progress `p` and session length `l` + // the threshold formula is: p^6 + 1/l + let session_length = T::NextSessionRotation::average_session_length(); + let residual = Permill::from_rational(1u32, session_length.saturated_into()); + let threshold: Permill = progress.saturating_pow(6).saturating_add(residual); + + let seed = sp_io::offchain::random_seed(); + let random = ::decode(&mut TrailingZeroInput::new(seed.as_ref())) + .expect("input is padded with zeroes; qed"); + let random = Permill::from_parts(random % Permill::ACCURACY); + + random <= threshold + }; - let too_early = if let (Some(progress), _) = + let should_heartbeat = if let (Some(progress), _) = T::NextSessionRotation::estimate_current_session_progress(block_number) { - // we try to get an estimate of the current session progress first since it - // should provide more accurate results and send the heartbeat if we're halfway - // through the session. - progress < HALF_SESSION + // we try to get an estimate of the current session progress first since it should + // provide more accurate results. we will start an early heartbeat period where we'll + // randomly pick whether to heartbeat. after 80% of the session has elapsed, if we + // haven't sent an heartbeat yet we'll send one unconditionally. the idea is to prevent + // all nodes from sending the heartbeats at the same block and causing a temporary (but + // deterministic) spike in transactions. + progress >= START_HEARTBEAT_FINAL_PERIOD + || progress >= START_HEARTBEAT_RANDOM_PERIOD && random_choice(progress) } else { // otherwise we fallback to using the block number calculated at the beginning // of the session that should roughly correspond to the middle of the session let heartbeat_after = >::get(); - block_number < heartbeat_after + block_number >= heartbeat_after }; - if too_early { + if !should_heartbeat { return Err(OffchainErr::TooEarly); } @@ -607,7 +630,6 @@ impl Pallet { ) } - fn send_single_heartbeat( authority_index: u32, key: T::AuthorityId, diff --git a/frame/im-online/src/mock.rs b/frame/im-online/src/mock.rs index 4f21012abc510..4bc976476a676 100644 --- a/frame/im-online/src/mock.rs +++ b/frame/im-online/src/mock.rs @@ -26,7 +26,7 @@ use pallet_session::historical as pallet_session_historical; use sp_core::H256; use sp_runtime::testing::{Header, TestXt, UintAuthorityId}; use sp_runtime::traits::{BlakeTwo256, ConvertInto, IdentityLookup}; -use sp_runtime::{Perbill, Percent}; +use sp_runtime::{Perbill, Permill}; use sp_staking::{ offence::{OffenceError, ReportOffence}, SessionIndex, @@ -182,7 +182,7 @@ impl pallet_authorship::Config for Runtime { } thread_local! { - pub static MOCK_CURRENT_SESSION_PROGRESS: RefCell>> = RefCell::new(None); + pub static MOCK_CURRENT_SESSION_PROGRESS: RefCell>> = RefCell::new(None); } thread_local! { @@ -199,7 +199,7 @@ impl frame_support::traits::EstimateNextSessionRotation for TestNextSession mock.unwrap_or(pallet_session::PeriodicSessions::::average_session_length()) } - fn estimate_current_session_progress(now: u64) -> (Option, Weight) { + fn estimate_current_session_progress(now: u64) -> (Option, Weight) { let (estimate, weight) = pallet_session::PeriodicSessions::::estimate_current_session_progress( now, diff --git a/frame/im-online/src/tests.rs b/frame/im-online/src/tests.rs index f100bd71c34f6..5fb8fd3a791e9 100644 --- a/frame/im-online/src/tests.rs +++ b/frame/im-online/src/tests.rs @@ -433,10 +433,92 @@ fn should_handle_non_linear_session_progress() { assert!(ImOnline::send_heartbeats(5).ok().is_some()); // if we have a valid current session progress then we'll heartbeat as soon - // as we're past 50% of the session regardless of the block number + // as we're past 80% of the session regardless of the block number MOCK_CURRENT_SESSION_PROGRESS - .with(|p| *p.borrow_mut() = Some(Some(Percent::from_percent(51)))); + .with(|p| *p.borrow_mut() = Some(Some(Permill::from_percent(81)))); assert!(ImOnline::send_heartbeats(2).ok().is_some()); }); } + +#[test] +fn test_does_not_heartbeat_early_in_the_session() { + let mut ext = new_test_ext(); + let (offchain, _state) = TestOffchainExt::new(); + let (pool, _) = TestTransactionPoolExt::new(); + ext.register_extension(OffchainDbExt::new(offchain.clone())); + ext.register_extension(OffchainWorkerExt::new(offchain)); + ext.register_extension(TransactionPoolExt::new(pool)); + + ext.execute_with(|| { + // mock current session progress as being 5%. we only randomly start + // heartbeating after 10% of the session has elapsed. + MOCK_CURRENT_SESSION_PROGRESS.with(|p| *p.borrow_mut() = Some(Some(Permill::from_float(0.05)))); + assert_eq!( + ImOnline::send_heartbeats(2).err(), + Some(OffchainErr::TooEarly), + ); + }); +} + +#[test] +fn test_probability_of_heartbeating_increases_with_session_progress() { + let mut ext = new_test_ext(); + let (offchain, state) = TestOffchainExt::new(); + let (pool, _) = TestTransactionPoolExt::new(); + ext.register_extension(OffchainDbExt::new(offchain.clone())); + ext.register_extension(OffchainWorkerExt::new(offchain)); + ext.register_extension(TransactionPoolExt::new(pool)); + + ext.execute_with(|| { + let set_test = |progress, random: f64| { + // the average session length is 100 blocks, therefore the residual + // probability of sending a heartbeat is 1% + MOCK_AVERAGE_SESSION_LENGTH.with(|p| *p.borrow_mut() = Some(100)); + MOCK_CURRENT_SESSION_PROGRESS.with(|p| *p.borrow_mut() = + Some(Some(Permill::from_float(progress)))); + + let mut seed = [0u8; 32]; + let encoded = ((random * Permill::ACCURACY as f64) as u32).encode(); + seed[0..4].copy_from_slice(&encoded); + state.write().seed = seed; + }; + + let assert_too_early = |progress, random| { + set_test(progress, random); + assert_eq!( + ImOnline::send_heartbeats(2).err(), + Some(OffchainErr::TooEarly), + ); + }; + + let assert_heartbeat_ok = |progress, random| { + set_test(progress, random); + assert!(ImOnline::send_heartbeats(2).ok().is_some()); + }; + + assert_too_early(0.05, 1.0); + + assert_too_early(0.1, 0.1); + assert_too_early(0.1, 0.011); + assert_heartbeat_ok(0.1, 0.010); + + assert_too_early(0.4, 0.015); + assert_heartbeat_ok(0.4, 0.014); + + assert_too_early(0.5, 0.026); + assert_heartbeat_ok(0.5, 0.025); + + assert_too_early(0.6, 0.057); + assert_heartbeat_ok(0.6, 0.056); + + assert_too_early(0.65, 0.086); + assert_heartbeat_ok(0.65, 0.085); + + assert_too_early(0.7, 0.13); + assert_heartbeat_ok(0.7, 0.12); + + assert_too_early(0.75, 0.19); + assert_heartbeat_ok(0.75, 0.18); + }); +} diff --git a/frame/session/src/lib.rs b/frame/session/src/lib.rs index 8574979ef2fea..547d29715d9c1 100644 --- a/frame/session/src/lib.rs +++ b/frame/session/src/lib.rs @@ -118,7 +118,7 @@ use sp_std::{prelude::*, marker::PhantomData, ops::{Sub, Rem}}; use codec::Decode; use sp_runtime::{ traits::{AtLeast32BitUnsigned, Convert, Member, One, OpaqueKeys, Zero}, - KeyTypeId, Perbill, Percent, RuntimeAppPublic, + KeyTypeId, Perbill, Permill, RuntimeAppPublic, }; use sp_staking::SessionIndex; use frame_support::{ @@ -168,7 +168,7 @@ impl< Period::get() } - fn estimate_current_session_progress(now: BlockNumber) -> (Option, Weight) { + fn estimate_current_session_progress(now: BlockNumber) -> (Option, Weight) { let offset = Offset::get(); let period = Period::get(); @@ -177,12 +177,12 @@ impl< // (0% is never returned). let progress = if now >= offset { let current = (now - offset) % period.clone() + One::one(); - Some(Percent::from_rational( + Some(Permill::from_rational( current.clone(), period.clone(), )) } else { - Some(Percent::from_rational( + Some(Permill::from_rational( now + One::one(), offset, )) diff --git a/frame/session/src/tests.rs b/frame/session/src/tests.rs index f48388b5a002c..a551e1a4a2612 100644 --- a/frame/session/src/tests.rs +++ b/frame/session/src/tests.rs @@ -274,11 +274,11 @@ fn periodic_session_works() { if P::estimate_next_session_rotation(i).0.unwrap() - 1 == i { assert_eq!( P::estimate_current_session_progress(i).0.unwrap(), - Percent::from_percent(100) + Permill::from_percent(100) ); } else { assert!( - P::estimate_current_session_progress(i).0.unwrap() < Percent::from_percent(100) + P::estimate_current_session_progress(i).0.unwrap() < Permill::from_percent(100) ); } } @@ -290,7 +290,7 @@ fn periodic_session_works() { assert_eq!(P::estimate_next_session_rotation(3u64).0.unwrap(), 3); assert_eq!( P::estimate_current_session_progress(3u64).0.unwrap(), - Percent::from_percent(10), + Permill::from_percent(10), ); for i in (1u64..10).map(|i| 3 + i) { @@ -302,11 +302,11 @@ fn periodic_session_works() { if P::estimate_next_session_rotation(i).0.unwrap() - 1 == i { assert_eq!( P::estimate_current_session_progress(i).0.unwrap(), - Percent::from_percent(100) + Permill::from_percent(100) ); } else { assert!( - P::estimate_current_session_progress(i).0.unwrap() < Percent::from_percent(100) + P::estimate_current_session_progress(i).0.unwrap() < Permill::from_percent(100) ); } } @@ -316,14 +316,14 @@ fn periodic_session_works() { assert_eq!(P::estimate_next_session_rotation(13u64).0.unwrap(), 23); assert_eq!( P::estimate_current_session_progress(13u64).0.unwrap(), - Percent::from_percent(10) + Permill::from_percent(10) ); assert!(!P::should_end_session(14u64)); assert_eq!(P::estimate_next_session_rotation(14u64).0.unwrap(), 23); assert_eq!( P::estimate_current_session_progress(14u64).0.unwrap(), - Percent::from_percent(20) + Permill::from_percent(20) ); } diff --git a/frame/support/src/traits/validation.rs b/frame/support/src/traits/validation.rs index 900be7bb8e7e2..d0583d6991fe6 100644 --- a/frame/support/src/traits/validation.rs +++ b/frame/support/src/traits/validation.rs @@ -20,7 +20,7 @@ use sp_std::prelude::*; use codec::{Codec, Decode}; use sp_runtime::traits::{Convert, Zero}; -use sp_runtime::{BoundToRuntimeAppPublic, ConsensusEngineId, Percent, RuntimeAppPublic}; +use sp_runtime::{BoundToRuntimeAppPublic, ConsensusEngineId, Permill, RuntimeAppPublic}; use sp_staking::SessionIndex; use crate::dispatch::Parameter; use crate::weights::Weight; @@ -126,7 +126,7 @@ pub trait EstimateNextSessionRotation { /// Return an estimate of the current session progress. /// /// None should be returned if the estimation fails to come to an answer. - fn estimate_current_session_progress(now: BlockNumber) -> (Option, Weight); + fn estimate_current_session_progress(now: BlockNumber) -> (Option, Weight); /// Return the block number at which the next session rotation is estimated to happen. /// @@ -139,7 +139,7 @@ impl EstimateNextSessionRotation for () { Zero::zero() } - fn estimate_current_session_progress(_: BlockNumber) -> (Option, Weight) { + fn estimate_current_session_progress(_: BlockNumber) -> (Option, Weight) { (None, Zero::zero()) } From 9e42949aeb8779a36a4c1f8cff037570815f9aff Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Sat, 12 Jun 2021 18:15:21 +0200 Subject: [PATCH 38/61] Enforce pub calls in pallets (#9085) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * make all extrinsics public so they are available from outside * Impl * fix * more fix * more pub * few more * merge fix * fix ui test * fix ui test Co-authored-by: Alexander Popiak Co-authored-by: Shawn Tabrizi Co-authored-by: Bastian Köcher --- frame/authorship/src/lib.rs | 2 +- .../election-provider-multi-phase/src/lib.rs | 2 +- frame/grandpa/src/lib.rs | 4 +-- frame/scheduler/src/lib.rs | 4 +-- frame/staking/src/lib.rs | 2 +- frame/sudo/src/mock.rs | 4 +-- .../procedural/src/pallet/parse/call.rs | 12 ++++++++ frame/support/src/lib.rs | 4 +-- frame/support/test/tests/pallet.rs | 8 ++--- .../test/tests/pallet_compatibility.rs | 2 +- .../tests/pallet_compatibility_instance.rs | 2 +- frame/support/test/tests/pallet_instance.rs | 4 +-- .../pallet_ui/call_argument_invalid_bound.rs | 2 +- .../call_argument_invalid_bound.stderr | 18 +++++------ .../call_argument_invalid_bound_2.rs | 2 +- .../call_argument_invalid_bound_2.stderr | 30 +++++++++---------- .../call_argument_invalid_bound_3.rs | 2 +- .../call_argument_invalid_bound_3.stderr | 18 +++++------ .../pallet_ui/call_invalid_origin_type.rs | 2 +- .../pallet_ui/call_invalid_origin_type.stderr | 12 ++++---- .../tests/pallet_ui/call_invalid_return.rs | 2 +- .../pallet_ui/call_invalid_return.stderr | 6 ++-- .../test/tests/pallet_ui/call_invalid_vis.rs | 27 +++++++++++++++++ .../tests/pallet_ui/call_invalid_vis.stderr | 5 ++++ .../tests/pallet_ui/call_invalid_vis_2.rs | 27 +++++++++++++++++ .../tests/pallet_ui/call_invalid_vis_2.stderr | 5 ++++ .../tests/pallet_ui/call_missing_weight.rs | 2 +- .../pallet_ui/call_missing_weight.stderr | 6 ++-- .../test/tests/pallet_ui/call_no_origin.rs | 2 +- .../tests/pallet_ui/call_no_origin.stderr | 6 ++-- .../test/tests/pallet_ui/call_no_return.rs | 2 +- .../tests/pallet_ui/call_no_return.stderr | 6 ++-- 32 files changed, 154 insertions(+), 78 deletions(-) create mode 100644 frame/support/test/tests/pallet_ui/call_invalid_vis.rs create mode 100644 frame/support/test/tests/pallet_ui/call_invalid_vis.stderr create mode 100644 frame/support/test/tests/pallet_ui/call_invalid_vis_2.rs create mode 100644 frame/support/test/tests/pallet_ui/call_invalid_vis_2.stderr diff --git a/frame/authorship/src/lib.rs b/frame/authorship/src/lib.rs index b00f412808a1a..9b46a3fe11990 100644 --- a/frame/authorship/src/lib.rs +++ b/frame/authorship/src/lib.rs @@ -226,7 +226,7 @@ pub mod pallet { impl Pallet { /// Provide a set of uncles. #[pallet::weight((0, DispatchClass::Mandatory))] - fn set_uncles(origin: OriginFor, new_uncles: Vec) -> DispatchResult { + pub fn set_uncles(origin: OriginFor, new_uncles: Vec) -> DispatchResult { ensure_none(origin)?; ensure!(new_uncles.len() <= MAX_UNCLES, Error::::TooManyUncles); diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index a4ca89a417e0f..0254525ce819d 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -786,7 +786,7 @@ pub mod pallet { /// /// This check can be turned off by setting the value to `None`. #[pallet::weight(T::DbWeight::get().writes(1))] - fn set_minimum_untrusted_score( + pub fn set_minimum_untrusted_score( origin: OriginFor, maybe_next_score: Option, ) -> DispatchResult { diff --git a/frame/grandpa/src/lib.rs b/frame/grandpa/src/lib.rs index 28546018a978f..2d10e3c96b14d 100644 --- a/frame/grandpa/src/lib.rs +++ b/frame/grandpa/src/lib.rs @@ -190,7 +190,7 @@ pub mod pallet { /// against the extracted offender. If both are valid, the offence /// will be reported. #[pallet::weight(T::WeightInfo::report_equivocation(key_owner_proof.validator_count()))] - fn report_equivocation( + pub fn report_equivocation( origin: OriginFor, equivocation_proof: EquivocationProof, key_owner_proof: T::KeyOwnerProof, @@ -236,7 +236,7 @@ pub mod pallet { /// will start the new authority set using the given finalized block as base. /// Only callable by root. #[pallet::weight(T::WeightInfo::note_stalled())] - fn note_stalled( + pub fn note_stalled( origin: OriginFor, delay: T::BlockNumber, best_finalized_block_number: T::BlockNumber, diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index 950bbde8bc499..a3520f3b21f7e 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -867,7 +867,7 @@ mod tests { #[pallet::call] impl Pallet where ::Origin: OriginTrait { #[pallet::weight(*weight)] - fn log(origin: OriginFor, i: u32, weight: Weight) -> DispatchResult { + pub fn log(origin: OriginFor, i: u32, weight: Weight) -> DispatchResult { Self::deposit_event(Event::Logged(i, weight)); LOG.with(|log| { log.borrow_mut().push((origin.caller().clone(), i)); @@ -876,7 +876,7 @@ mod tests { } #[pallet::weight(*weight)] - fn log_without_filter(origin: OriginFor, i: u32, weight: Weight) -> DispatchResult { + pub fn log_without_filter(origin: OriginFor, i: u32, weight: Weight) -> DispatchResult { Self::deposit_event(Event::Logged(i, weight)); LOG.with(|log| { log.borrow_mut().push((origin.caller().clone(), i)); diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 49660350ba916..0a22f31e6c3f5 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -1959,7 +1959,7 @@ pub mod pallet { /// Paying even a dead controller is cheaper weight-wise. We don't do any refunds here. /// # #[pallet::weight(T::WeightInfo::payout_stakers_alive_staked(T::MaxNominatorRewardedPerValidator::get()))] - pub(super) fn payout_stakers( + pub fn payout_stakers( origin: OriginFor, validator_stash: T::AccountId, era: EraIndex, diff --git a/frame/sudo/src/mock.rs b/frame/sudo/src/mock.rs index 6b296c62fe6c7..92683f98fb64f 100644 --- a/frame/sudo/src/mock.rs +++ b/frame/sudo/src/mock.rs @@ -45,7 +45,7 @@ pub mod logger { #[pallet::call] impl Pallet { #[pallet::weight(*weight)] - pub(crate) fn privileged_i32_log( + pub fn privileged_i32_log( origin: OriginFor, i: i32, weight: Weight @@ -58,7 +58,7 @@ pub mod logger { } #[pallet::weight(*weight)] - pub(crate) fn non_privileged_log( + pub fn non_privileged_log( origin: OriginFor, i: i32, weight: Weight diff --git a/frame/support/procedural/src/pallet/parse/call.rs b/frame/support/procedural/src/pallet/parse/call.rs index c2e6dce22539f..299b86cf6f84e 100644 --- a/frame/support/procedural/src/pallet/parse/call.rs +++ b/frame/support/procedural/src/pallet/parse/call.rs @@ -149,6 +149,18 @@ impl CallDef { let mut methods = vec![]; for impl_item in &mut item.items { if let syn::ImplItem::Method(method) = impl_item { + if !matches!(method.vis, syn::Visibility::Public(_)) { + let msg = "Invalid pallet::call, dispatchable function must be public: \ + `pub fn`"; + + let span = match method.vis { + syn::Visibility::Inherited => method.sig.span(), + _ => method.vis.span(), + }; + + return Err(syn::Error::new(span, msg)); + } + match method.sig.inputs.first() { None => { let msg = "Invalid pallet::call, must have at least origin arg"; diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 1d4d7e461834c..43891c158200e 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -1438,7 +1438,7 @@ pub mod pallet_prelude { /// impl Pallet { /// /// $some_doc /// #[pallet::weight($ExpressionResultingInWeight)] -/// $vis fn $fn_name( +/// pub fn $fn_name( /// origin: OriginFor, /// $some_arg: $some_type, /// // or with compact attribute: #[pallet::compact] $some_arg: $some_type, @@ -1897,7 +1897,7 @@ pub mod pallet_prelude { /// impl Pallet { /// /// Doc comment put in metadata /// #[pallet::weight(0)] // Defines weight for call (function parameters are in scope) -/// fn toto( +/// pub fn toto( /// origin: OriginFor, /// #[pallet::compact] _foo: u32, /// ) -> DispatchResultWithPostInfo { diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index f7e04e9226874..a79c25ae8f3e3 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -133,11 +133,11 @@ pub mod pallet { #[pallet::call] impl Pallet - where T::AccountId: From + From + SomeAssociation1 + where T::AccountId: From + From + SomeAssociation1 { /// Doc comment put in metadata #[pallet::weight(Weight::from(*_foo))] - fn foo( + pub fn foo( origin: OriginFor, #[pallet::compact] _foo: u32, _bar: u32, @@ -152,7 +152,7 @@ pub mod pallet { /// Doc comment put in metadata #[pallet::weight(1)] #[frame_support::transactional] - fn foo_transactional( + pub fn foo_transactional( _origin: OriginFor, #[pallet::compact] foo: u32, ) -> DispatchResultWithPostInfo { @@ -166,7 +166,7 @@ pub mod pallet { // Test for DispatchResult return type #[pallet::weight(1)] - fn foo_no_post_info( + pub fn foo_no_post_info( _origin: OriginFor, ) -> DispatchResult { Ok(()) diff --git a/frame/support/test/tests/pallet_compatibility.rs b/frame/support/test/tests/pallet_compatibility.rs index 130014f1e9eb1..db01d15e5daa9 100644 --- a/frame/support/test/tests/pallet_compatibility.rs +++ b/frame/support/test/tests/pallet_compatibility.rs @@ -123,7 +123,7 @@ pub mod pallet { #[pallet::call] impl Pallet { #[pallet::weight(>::into(new_value.clone()))] - fn set_dummy( + pub fn set_dummy( origin: OriginFor, #[pallet::compact] new_value: T::Balance ) -> DispatchResultWithPostInfo { diff --git a/frame/support/test/tests/pallet_compatibility_instance.rs b/frame/support/test/tests/pallet_compatibility_instance.rs index d80d9ba3dff7d..63e71c8bf255c 100644 --- a/frame/support/test/tests/pallet_compatibility_instance.rs +++ b/frame/support/test/tests/pallet_compatibility_instance.rs @@ -113,7 +113,7 @@ pub mod pallet { #[pallet::call] impl, I: 'static> Pallet { #[pallet::weight(>::into(new_value.clone()))] - fn set_dummy( + pub fn set_dummy( origin: OriginFor, #[pallet::compact] new_value: T::Balance ) -> DispatchResultWithPostInfo { diff --git a/frame/support/test/tests/pallet_instance.rs b/frame/support/test/tests/pallet_instance.rs index 48ff166c5b226..f0b72da2c7fbf 100644 --- a/frame/support/test/tests/pallet_instance.rs +++ b/frame/support/test/tests/pallet_instance.rs @@ -81,7 +81,7 @@ pub mod pallet { impl, I: 'static> Pallet { /// Doc comment put in metadata #[pallet::weight(Weight::from(*_foo))] - fn foo(origin: OriginFor, #[pallet::compact] _foo: u32) -> DispatchResultWithPostInfo { + pub fn foo(origin: OriginFor, #[pallet::compact] _foo: u32) -> DispatchResultWithPostInfo { let _ = origin; Self::deposit_event(Event::Something(3)); Ok(().into()) @@ -90,7 +90,7 @@ pub mod pallet { /// Doc comment put in metadata #[pallet::weight(1)] #[frame_support::transactional] - fn foo_transactional( + pub fn foo_transactional( origin: OriginFor, #[pallet::compact] _foo: u32 ) -> DispatchResultWithPostInfo { diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.rs b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.rs index 69d35344d5761..0f58187f73ebe 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.rs +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.rs @@ -17,7 +17,7 @@ mod pallet { #[pallet::call] impl Pallet { #[pallet::weight(0)] - fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { Ok(().into()) } } diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr index 1eaf71be17104..ead05261b1938 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr @@ -1,8 +1,8 @@ error[E0369]: binary operation `==` cannot be applied to type `&::Bar` - --> $DIR/call_argument_invalid_bound.rs:20:37 + --> $DIR/call_argument_invalid_bound.rs:20:41 | -20 | fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { - | ^ +20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + | ^ | help: consider further restricting this bound | @@ -10,18 +10,18 @@ help: consider further restricting this bound | ^^^^^^^^^^^^^^^^^^^^^ error[E0277]: the trait bound `::Bar: Clone` is not satisfied - --> $DIR/call_argument_invalid_bound.rs:20:37 + --> $DIR/call_argument_invalid_bound.rs:20:41 | -20 | fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { - | ^ the trait `Clone` is not implemented for `::Bar` +20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + | ^ the trait `Clone` is not implemented for `::Bar` | = note: required by `clone` error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` - --> $DIR/call_argument_invalid_bound.rs:20:37 + --> $DIR/call_argument_invalid_bound.rs:20:41 | -20 | fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { - | ^ `::Bar` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` +20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + | ^ `::Bar` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` | = help: the trait `std::fmt::Debug` is not implemented for `::Bar` = note: required because of the requirements on the impl of `std::fmt::Debug` for `&::Bar` diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.rs b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.rs index 581c72a4240a0..da87046822eb7 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.rs +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.rs @@ -17,7 +17,7 @@ mod pallet { #[pallet::call] impl Pallet { #[pallet::weight(0)] - fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { Ok(().into()) } } diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr index 1d0e96be9edb9..2a3bbe1abf4cd 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr @@ -1,8 +1,8 @@ error[E0277]: the trait bound `::Bar: WrapperTypeDecode` is not satisfied - --> $DIR/call_argument_invalid_bound_2.rs:20:37 + --> $DIR/call_argument_invalid_bound_2.rs:20:41 | -20 | fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { - | ^ the trait `WrapperTypeDecode` is not implemented for `::Bar` +20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + | ^ the trait `WrapperTypeDecode` is not implemented for `::Bar` | ::: /usr/local/cargo/registry/src/github.com-1ecc6299db9ec823/parity-scale-codec-2.1.1/src/codec.rs:277:18 | @@ -12,10 +12,10 @@ error[E0277]: the trait bound `::Bar: WrapperTypeDecode` is = note: required because of the requirements on the impl of `Decode` for `::Bar` error[E0277]: the trait bound `::Bar: WrapperTypeEncode` is not satisfied - --> $DIR/call_argument_invalid_bound_2.rs:20:37 + --> $DIR/call_argument_invalid_bound_2.rs:20:41 | -20 | fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { - | ^ the trait `WrapperTypeEncode` is not implemented for `::Bar` +20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + | ^ the trait `WrapperTypeEncode` is not implemented for `::Bar` | ::: /usr/local/cargo/registry/src/github.com-1ecc6299db9ec823/parity-scale-codec-2.1.1/src/codec.rs:216:21 | @@ -25,10 +25,10 @@ error[E0277]: the trait bound `::Bar: WrapperTypeEncode` is = note: required because of the requirements on the impl of `pallet::_::_parity_scale_codec::Encode` for `::Bar` error[E0369]: binary operation `==` cannot be applied to type `&::Bar` - --> $DIR/call_argument_invalid_bound_2.rs:20:37 + --> $DIR/call_argument_invalid_bound_2.rs:20:41 | -20 | fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { - | ^ +20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + | ^ | help: consider further restricting this bound | @@ -36,18 +36,18 @@ help: consider further restricting this bound | ^^^^^^^^^^^^^^^^^^^^^ error[E0277]: the trait bound `::Bar: Clone` is not satisfied - --> $DIR/call_argument_invalid_bound_2.rs:20:37 + --> $DIR/call_argument_invalid_bound_2.rs:20:41 | -20 | fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { - | ^ the trait `Clone` is not implemented for `::Bar` +20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + | ^ the trait `Clone` is not implemented for `::Bar` | = note: required by `clone` error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` - --> $DIR/call_argument_invalid_bound_2.rs:20:37 + --> $DIR/call_argument_invalid_bound_2.rs:20:41 | -20 | fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { - | ^ `::Bar` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` +20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + | ^ `::Bar` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` | = help: the trait `std::fmt::Debug` is not implemented for `::Bar` = note: required because of the requirements on the impl of `std::fmt::Debug` for `&::Bar` diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.rs b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.rs index 97f362551037d..4a6a781ff44a7 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.rs +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.rs @@ -19,7 +19,7 @@ mod pallet { #[pallet::call] impl Pallet { #[pallet::weight(0)] - fn foo(origin: OriginFor, bar: Bar) -> DispatchResultWithPostInfo { + pub fn foo(origin: OriginFor, bar: Bar) -> DispatchResultWithPostInfo { Ok(().into()) } } diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr index 89cee573a2757..73c3069719ea2 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr @@ -1,24 +1,24 @@ error[E0369]: binary operation `==` cannot be applied to type `&Bar` - --> $DIR/call_argument_invalid_bound_3.rs:22:37 + --> $DIR/call_argument_invalid_bound_3.rs:22:41 | -22 | fn foo(origin: OriginFor, bar: Bar) -> DispatchResultWithPostInfo { - | ^^^ +22 | pub fn foo(origin: OriginFor, bar: Bar) -> DispatchResultWithPostInfo { + | ^^^ | = note: an implementation of `std::cmp::PartialEq` might be missing for `&Bar` error[E0277]: the trait bound `Bar: Clone` is not satisfied - --> $DIR/call_argument_invalid_bound_3.rs:22:37 + --> $DIR/call_argument_invalid_bound_3.rs:22:41 | -22 | fn foo(origin: OriginFor, bar: Bar) -> DispatchResultWithPostInfo { - | ^^^ the trait `Clone` is not implemented for `Bar` +22 | pub fn foo(origin: OriginFor, bar: Bar) -> DispatchResultWithPostInfo { + | ^^^ the trait `Clone` is not implemented for `Bar` | = note: required by `clone` error[E0277]: `Bar` doesn't implement `std::fmt::Debug` - --> $DIR/call_argument_invalid_bound_3.rs:22:37 + --> $DIR/call_argument_invalid_bound_3.rs:22:41 | -22 | fn foo(origin: OriginFor, bar: Bar) -> DispatchResultWithPostInfo { - | ^^^ `Bar` cannot be formatted using `{:?}` +22 | pub fn foo(origin: OriginFor, bar: Bar) -> DispatchResultWithPostInfo { + | ^^^ `Bar` cannot be formatted using `{:?}` | = help: the trait `std::fmt::Debug` is not implemented for `Bar` = note: add `#[derive(Debug)]` or manually implement `std::fmt::Debug` diff --git a/frame/support/test/tests/pallet_ui/call_invalid_origin_type.rs b/frame/support/test/tests/pallet_ui/call_invalid_origin_type.rs index edf953b5976c0..2502506fa6aa4 100644 --- a/frame/support/test/tests/pallet_ui/call_invalid_origin_type.rs +++ b/frame/support/test/tests/pallet_ui/call_invalid_origin_type.rs @@ -14,7 +14,7 @@ mod pallet { #[pallet::call] impl Pallet { - fn foo(origin: u8) {} + pub fn foo(origin: u8) {} } } diff --git a/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr b/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr index 855c59fd8d57d..f17cd9016a6e4 100644 --- a/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr +++ b/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr @@ -1,11 +1,11 @@ error: Invalid type: expected `OriginFor` - --> $DIR/call_invalid_origin_type.rs:17:18 + --> $DIR/call_invalid_origin_type.rs:17:22 | -17 | fn foo(origin: u8) {} - | ^^ +17 | pub fn foo(origin: u8) {} + | ^^ error: expected `OriginFor` - --> $DIR/call_invalid_origin_type.rs:17:18 + --> $DIR/call_invalid_origin_type.rs:17:22 | -17 | fn foo(origin: u8) {} - | ^^ +17 | pub fn foo(origin: u8) {} + | ^^ diff --git a/frame/support/test/tests/pallet_ui/call_invalid_return.rs b/frame/support/test/tests/pallet_ui/call_invalid_return.rs index 477e7f3219de3..1ccdff5d07374 100644 --- a/frame/support/test/tests/pallet_ui/call_invalid_return.rs +++ b/frame/support/test/tests/pallet_ui/call_invalid_return.rs @@ -14,7 +14,7 @@ mod pallet { #[pallet::call] impl Pallet { - fn foo(origin: OriginFor) -> ::DispatchResult { todo!() } + pub fn foo(origin: OriginFor) -> ::DispatchResult { todo!() } } } diff --git a/frame/support/test/tests/pallet_ui/call_invalid_return.stderr b/frame/support/test/tests/pallet_ui/call_invalid_return.stderr index c79da3bbf78c3..6a851ed3fc283 100644 --- a/frame/support/test/tests/pallet_ui/call_invalid_return.stderr +++ b/frame/support/test/tests/pallet_ui/call_invalid_return.stderr @@ -1,5 +1,5 @@ error: expected `DispatchResultWithPostInfo` or `DispatchResult` - --> $DIR/call_invalid_return.rs:17:35 + --> $DIR/call_invalid_return.rs:17:39 | -17 | fn foo(origin: OriginFor) -> ::DispatchResult { todo!() } - | ^^ +17 | pub fn foo(origin: OriginFor) -> ::DispatchResult { todo!() } + | ^^ diff --git a/frame/support/test/tests/pallet_ui/call_invalid_vis.rs b/frame/support/test/tests/pallet_ui/call_invalid_vis.rs new file mode 100644 index 0000000000000..fe1c5aee453d4 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_invalid_vis.rs @@ -0,0 +1,27 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, DispatchResultWithPostInfo}; + use frame_system::pallet_prelude::{BlockNumberFor, OriginFor}; + + #[pallet::config] + pub trait Config: frame_system::Config { + type Bar: codec::Codec; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { + #[pallet::weight(0)] + fn foo(origin: OriginFor) -> DispatchResultWithPostInfo { + Ok(().into()) + } + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_invalid_vis.stderr b/frame/support/test/tests/pallet_ui/call_invalid_vis.stderr new file mode 100644 index 0000000000000..321828a1ae28e --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_invalid_vis.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::call, dispatchable function must be public: `pub fn` + --> $DIR/call_invalid_vis.rs:20:3 + | +20 | fn foo(origin: OriginFor) -> DispatchResultWithPostInfo { + | ^^ diff --git a/frame/support/test/tests/pallet_ui/call_invalid_vis_2.rs b/frame/support/test/tests/pallet_ui/call_invalid_vis_2.rs new file mode 100644 index 0000000000000..fb25e9876dc8d --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_invalid_vis_2.rs @@ -0,0 +1,27 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, DispatchResultWithPostInfo}; + use frame_system::pallet_prelude::{BlockNumberFor, OriginFor}; + + #[pallet::config] + pub trait Config: frame_system::Config { + type Bar: codec::Codec; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { + #[pallet::weight(0)] + pub(crate) fn foo(origin: OriginFor) -> DispatchResultWithPostInfo { + Ok(().into()) + } + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_invalid_vis_2.stderr b/frame/support/test/tests/pallet_ui/call_invalid_vis_2.stderr new file mode 100644 index 0000000000000..7d3113474af73 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_invalid_vis_2.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::call, dispatchable function must be public: `pub fn` + --> $DIR/call_invalid_vis_2.rs:20:3 + | +20 | pub(crate) fn foo(origin: OriginFor) -> DispatchResultWithPostInfo { + | ^^^ diff --git a/frame/support/test/tests/pallet_ui/call_missing_weight.rs b/frame/support/test/tests/pallet_ui/call_missing_weight.rs index 2ce607c53ac3a..4cdb85502b57f 100644 --- a/frame/support/test/tests/pallet_ui/call_missing_weight.rs +++ b/frame/support/test/tests/pallet_ui/call_missing_weight.rs @@ -14,7 +14,7 @@ mod pallet { #[pallet::call] impl Pallet { - fn foo(origin: OriginFor) -> DispatchResultWithPostInfo {} + pub fn foo(origin: OriginFor) -> DispatchResultWithPostInfo {} } } diff --git a/frame/support/test/tests/pallet_ui/call_missing_weight.stderr b/frame/support/test/tests/pallet_ui/call_missing_weight.stderr index 37386d7771a7a..ec45d478870c1 100644 --- a/frame/support/test/tests/pallet_ui/call_missing_weight.stderr +++ b/frame/support/test/tests/pallet_ui/call_missing_weight.stderr @@ -1,5 +1,5 @@ error: Invalid pallet::call, requires weight attribute i.e. `#[pallet::weight($expr)]` - --> $DIR/call_missing_weight.rs:17:3 + --> $DIR/call_missing_weight.rs:17:7 | -17 | fn foo(origin: OriginFor) -> DispatchResultWithPostInfo {} - | ^^ +17 | pub fn foo(origin: OriginFor) -> DispatchResultWithPostInfo {} + | ^^ diff --git a/frame/support/test/tests/pallet_ui/call_no_origin.rs b/frame/support/test/tests/pallet_ui/call_no_origin.rs index 83d10b6b08b4f..231c75f43f4ad 100644 --- a/frame/support/test/tests/pallet_ui/call_no_origin.rs +++ b/frame/support/test/tests/pallet_ui/call_no_origin.rs @@ -14,7 +14,7 @@ mod pallet { #[pallet::call] impl Pallet { - fn foo() {} + pub fn foo() {} } } diff --git a/frame/support/test/tests/pallet_ui/call_no_origin.stderr b/frame/support/test/tests/pallet_ui/call_no_origin.stderr index 42afd02c42639..97574ea1b644c 100644 --- a/frame/support/test/tests/pallet_ui/call_no_origin.stderr +++ b/frame/support/test/tests/pallet_ui/call_no_origin.stderr @@ -1,5 +1,5 @@ error: Invalid pallet::call, must have at least origin arg - --> $DIR/call_no_origin.rs:17:3 + --> $DIR/call_no_origin.rs:17:7 | -17 | fn foo() {} - | ^^ +17 | pub fn foo() {} + | ^^ diff --git a/frame/support/test/tests/pallet_ui/call_no_return.rs b/frame/support/test/tests/pallet_ui/call_no_return.rs index a18c30f6d6d90..68a883c52c072 100644 --- a/frame/support/test/tests/pallet_ui/call_no_return.rs +++ b/frame/support/test/tests/pallet_ui/call_no_return.rs @@ -14,7 +14,7 @@ mod pallet { #[pallet::call] impl Pallet { - fn foo(origin: OriginFor) {} + pub fn foo(origin: OriginFor) {} } } diff --git a/frame/support/test/tests/pallet_ui/call_no_return.stderr b/frame/support/test/tests/pallet_ui/call_no_return.stderr index b16d401355c12..18ebbaff76d9d 100644 --- a/frame/support/test/tests/pallet_ui/call_no_return.stderr +++ b/frame/support/test/tests/pallet_ui/call_no_return.stderr @@ -1,5 +1,5 @@ error: Invalid pallet::call, require return type DispatchResultWithPostInfo - --> $DIR/call_no_return.rs:17:3 + --> $DIR/call_no_return.rs:17:7 | -17 | fn foo(origin: OriginFor) {} - | ^^ +17 | pub fn foo(origin: OriginFor) {} + | ^^ From ab84c8cfe8511ef9f8e5ffb8a497383f4b008daa Mon Sep 17 00:00:00 2001 From: Alan Sapede Date: Sat, 12 Jun 2021 20:31:53 -0400 Subject: [PATCH 39/61] Adds moonbeam, moonriver to ss58 registry (#9028) * Adds moonream, moonriver to ss58 registry * Fixes names --- primitives/core/src/crypto.rs | 4 ++++ ss58-registry.json | 18 ++++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index 08e6211e32331..5be18422d0e12 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -586,6 +586,10 @@ ss58_address_format!( (78, "calamari", "Manta Canary Network, standard account (*25519).") SocialAccount => (252, "social-network", "Social Network, standard account (*25519).") + Moonbeam => + (1284, "moonbeam", "Moonbeam, session key (*25519).") + Moonriver => + (1285, "moonriver", "Moonriver, session key (*25519).") BasiliskAccount => (10041, "basilisk", "Basilisk standard account (*25519).") diff --git a/ss58-registry.json b/ss58-registry.json index 1fa01597f20fa..9fec4b7be9f5a 100644 --- a/ss58-registry.json +++ b/ss58-registry.json @@ -514,6 +514,24 @@ "standardAccount": "*25519", "website": "https://social.network" }, + { + "prefix": 1284, + "network": "moonbeam", + "displayName": "Moonbeam", + "symbols": ["GLMR"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://moonbeam.network" + }, + { + "prefix": 1285, + "network": "moonriver", + "displayName": "Moonriver", + "symbols": ["MOVR"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://moonbeam.network" + }, { "prefix": 10041, "network": "basilisk", From 3a41701a675b81a264cccf6b2771bfff74f6674a Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Sun, 13 Jun 2021 01:36:36 +0100 Subject: [PATCH 40/61] Allow additional trait bounds for `#[pallet::constant]` (#9050) * Allow additional trait bounds for constants * Add ui test for constants with additional trait bounds * Update trait constant ui test * Import syn::Error * Use reference instead of cloning * Add extra invalid bound ui test * Out or order valid bounds * Fix ui test * Fix ui test * Apply review suggestion about error message --- .../procedural/src/pallet/parse/config.rs | 60 ++++++++++++------- frame/support/test/tests/pallet_ui.rs | 1 + .../pass/trait_constant_valid_bounds.rs | 29 +++++++++ .../trait_constant_invalid_bound.stderr | 8 +-- .../trait_constant_invalid_bound_lifetime.rs | 23 +++++++ ...ait_constant_invalid_bound_lifetime.stderr | 5 ++ 6 files changed, 96 insertions(+), 30 deletions(-) create mode 100644 frame/support/test/tests/pallet_ui/pass/trait_constant_valid_bounds.rs create mode 100644 frame/support/test/tests/pallet_ui/trait_constant_invalid_bound_lifetime.rs create mode 100644 frame/support/test/tests/pallet_ui/trait_constant_invalid_bound_lifetime.stderr diff --git a/frame/support/procedural/src/pallet/parse/config.rs b/frame/support/procedural/src/pallet/parse/config.rs index 79d4680752b90..69dfaeb7f9e9b 100644 --- a/frame/support/procedural/src/pallet/parse/config.rs +++ b/frame/support/procedural/src/pallet/parse/config.rs @@ -16,6 +16,7 @@ // limitations under the License. use super::helper; +use core::convert::TryFrom; use syn::spanned::Spanned; use quote::ToTokens; @@ -25,7 +26,6 @@ mod keyword { syn::custom_keyword!(From); syn::custom_keyword!(T); syn::custom_keyword!(I); - syn::custom_keyword!(Get); syn::custom_keyword!(config); syn::custom_keyword!(IsType); syn::custom_keyword!(Event); @@ -62,19 +62,41 @@ pub struct ConstMetadataDef { pub doc: Vec, } -impl syn::parse::Parse for ConstMetadataDef { - fn parse(input: syn::parse::ParseStream) -> syn::Result { - let doc = helper::get_doc_literals(&syn::Attribute::parse_outer(input)?); - input.parse::()?; - let ident = input.parse::()?; - input.parse::()?; - input.parse::()?; - input.parse::()?; - let mut type_ = input.parse::()?; - type_ = syn::parse2::(replace_self_by_t(type_.to_token_stream())) +impl TryFrom<&syn::TraitItemType> for ConstMetadataDef { + type Error = syn::Error; + + fn try_from(trait_ty: &syn::TraitItemType) -> Result { + let err = |span, msg| + syn::Error::new(span, format!("Invalid usage of `#[pallet::constant]`: {}", msg)); + let doc = helper::get_doc_literals(&trait_ty.attrs); + let ident = trait_ty.ident.clone(); + let bound = trait_ty.bounds + .iter() + .find_map(|b| + if let syn::TypeParamBound::Trait(tb) = b { + tb.path.segments + .last() + .and_then(|s| if s.ident == "Get" { Some(s) } else { None } ) + } else { + None + } + ) + .ok_or_else(|| err(trait_ty.span(), "`Get` trait bound not found"))?; + let type_arg = if let syn::PathArguments::AngleBracketed (ref ab) = bound.arguments { + if ab.args.len() == 1 { + if let syn::GenericArgument::Type(ref ty) = ab.args[0] { + Ok(ty) + } else { + Err(err(ab.args[0].span(), "Expected a type argument")) + } + } else { + Err(err(bound.span(), "Expected a single type argument")) + } + } else { + Err(err(bound.span(), "Expected trait generic args")) + }?; + let type_ = syn::parse2::(replace_self_by_t(type_arg.to_token_stream())) .expect("Internal error: replacing `Self` by `T` should result in valid type"); - input.parse::]>()?; - input.parse::()?; Ok(Self { ident, type_, doc }) } @@ -322,16 +344,8 @@ impl ConfigDef { if type_attrs_const.len() == 1 { match trait_item { - syn::TraitItem::Type(type_) => { - let constant = syn::parse2::(type_.to_token_stream()) - .map_err(|e| { - let error_msg = "Invalid usage of `#[pallet::constant]`, syntax \ - must be `type $SomeIdent: Get<$SomeType>;`"; - let mut err = syn::Error::new(type_.span(), error_msg); - err.combine(e); - err - })?; - + syn::TraitItem::Type(ref type_) => { + let constant = ConstMetadataDef::try_from(type_)?; consts_metadata.push(constant); }, _ => { diff --git a/frame/support/test/tests/pallet_ui.rs b/frame/support/test/tests/pallet_ui.rs index 1836b06cabfdd..e5f4a54dfb000 100644 --- a/frame/support/test/tests/pallet_ui.rs +++ b/frame/support/test/tests/pallet_ui.rs @@ -23,4 +23,5 @@ fn pallet_ui() { let t = trybuild::TestCases::new(); t.compile_fail("tests/pallet_ui/*.rs"); + t.pass("tests/pallet_ui/pass/*.rs"); } diff --git a/frame/support/test/tests/pallet_ui/pass/trait_constant_valid_bounds.rs b/frame/support/test/tests/pallet_ui/pass/trait_constant_valid_bounds.rs new file mode 100644 index 0000000000000..71eb4f2992b39 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/pass/trait_constant_valid_bounds.rs @@ -0,0 +1,29 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config { + #[pallet::constant] + type U: Get; + + #[pallet::constant] + type V: Get + From; + + #[pallet::constant] + type W: From + Get; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/trait_constant_invalid_bound.stderr b/frame/support/test/tests/pallet_ui/trait_constant_invalid_bound.stderr index 16c3531140eaa..057ec6ffb2c75 100644 --- a/frame/support/test/tests/pallet_ui/trait_constant_invalid_bound.stderr +++ b/frame/support/test/tests/pallet_ui/trait_constant_invalid_bound.stderr @@ -1,11 +1,5 @@ -error: Invalid usage of `#[pallet::constant]`, syntax must be `type $SomeIdent: Get<$SomeType>;` +error: Invalid usage of `#[pallet::constant]`: `Get` trait bound not found --> $DIR/trait_constant_invalid_bound.rs:9:3 | 9 | type U; | ^^^^ - -error: expected `:` - --> $DIR/trait_constant_invalid_bound.rs:9:9 - | -9 | type U; - | ^ diff --git a/frame/support/test/tests/pallet_ui/trait_constant_invalid_bound_lifetime.rs b/frame/support/test/tests/pallet_ui/trait_constant_invalid_bound_lifetime.rs new file mode 100644 index 0000000000000..47303f2b20a02 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/trait_constant_invalid_bound_lifetime.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config { + #[pallet::constant] + type U: Get<'static>; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/trait_constant_invalid_bound_lifetime.stderr b/frame/support/test/tests/pallet_ui/trait_constant_invalid_bound_lifetime.stderr new file mode 100644 index 0000000000000..8d830fed8f392 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/trait_constant_invalid_bound_lifetime.stderr @@ -0,0 +1,5 @@ +error: Invalid usage of `#[pallet::constant]`: Expected a type argument + --> $DIR/trait_constant_invalid_bound_lifetime.rs:9:15 + | +9 | type U: Get<'static>; + | ^^^^^^^ From c8d5796ae2b0ee5c71e2ee585fc05b3fa94ac84f Mon Sep 17 00:00:00 2001 From: Xiliang Chen Date: Sun, 13 Jun 2021 18:26:42 +1200 Subject: [PATCH 41/61] remove Default from AssetId trait bound (#9062) * update AssetId trait * try again --- frame/support/src/traits/tokens/misc.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frame/support/src/traits/tokens/misc.rs b/frame/support/src/traits/tokens/misc.rs index d6329e585324c..0c55ac79182cf 100644 --- a/frame/support/src/traits/tokens/misc.rs +++ b/frame/support/src/traits/tokens/misc.rs @@ -161,8 +161,8 @@ impl WithdrawReasons { } /// Simple amalgamation trait to collect together properties for an AssetId under one roof. -pub trait AssetId: FullCodec + Copy + Default + Eq + PartialEq + Debug {} -impl AssetId for T {} +pub trait AssetId: FullCodec + Copy + Eq + PartialEq + Debug {} +impl AssetId for T {} /// Simple amalgamation trait to collect together properties for a Balance under one roof. pub trait Balance: AtLeast32BitUnsigned + FullCodec + Copy + Default + Debug {} From 11d5eff647a54a70e87f805b3dabca458faca283 Mon Sep 17 00:00:00 2001 From: Lldenaurois Date: Sun, 13 Jun 2021 06:24:05 -0400 Subject: [PATCH 42/61] Add function to test whether function is exported in wasm blob (#9093) * Add function to test whether function is exported in wasm blob * Address Feedback * Update based on feedback --- client/executor/common/src/runtime_blob/runtime_blob.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/client/executor/common/src/runtime_blob/runtime_blob.rs b/client/executor/common/src/runtime_blob/runtime_blob.rs index aac023e960c79..82b9312dec501 100644 --- a/client/executor/common/src/runtime_blob/runtime_blob.rs +++ b/client/executor/common/src/runtime_blob/runtime_blob.rs @@ -81,6 +81,15 @@ impl RuntimeBlob { export_mutable_globals(&mut self.raw_module, "exported_internal_global"); } + /// Perform an instrumentation that makes sure that a specific function `entry_point` is exported + pub fn entry_point_exists(&self, entry_point: &str) -> bool { + self.raw_module.export_section().map(|e| { + e.entries() + .iter() + .any(|e| matches!(e.internal(), Internal::Function(_)) && e.field() == entry_point) + }).unwrap_or_default() + } + /// Returns an iterator of all globals which were exported by [`expose_mutable_globals`]. pub(super) fn exported_internal_global_names<'module>( &'module self, From 125c4b365f21e60e3d284e3c73cbf0585bfc7342 Mon Sep 17 00:00:00 2001 From: chenwei Date: Sun, 13 Jun 2021 18:27:54 +0800 Subject: [PATCH 43/61] Make find_proxy public. (#9094) export `pallet_proxy::find_prox` and `ProxyDefinition`. --- frame/proxy/src/lib.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/frame/proxy/src/lib.rs b/frame/proxy/src/lib.rs index 6e78df2c7326d..bc892b65b3774 100644 --- a/frame/proxy/src/lib.rs +++ b/frame/proxy/src/lib.rs @@ -64,12 +64,12 @@ type BalanceOf = <::Currency as Currency< { /// The account which may act on behalf of another. - delegate: AccountId, + pub delegate: AccountId, /// A value defining the subset of calls that it is allowed to make. - proxy_type: ProxyType, + pub proxy_type: ProxyType, /// The number of blocks that an announcement must be in place for before the corresponding call /// may be dispatched. If zero, then no announcement is needed. - delay: BlockNumber, + pub delay: BlockNumber, } /// Details surrounding a specific instance of an announcement to make a call. @@ -734,7 +734,7 @@ impl Pallet { }) } - fn find_proxy( + pub fn find_proxy( real: &T::AccountId, delegate: &T::AccountId, force_proxy_type: Option, From 6b3c76a23ed18c5d4f2149edebd85063a85a8218 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sun, 13 Jun 2021 12:41:13 +0100 Subject: [PATCH 44/61] pallet-authorship: Fixing some nitpicks (#9095) As reviewing the pallet yesterday, I have found some nitpicks that I fixed. --- frame/authorship/src/lib.rs | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/frame/authorship/src/lib.rs b/frame/authorship/src/lib.rs index 9b46a3fe11990..98d20ec621406 100644 --- a/frame/authorship/src/lib.rs +++ b/frame/authorship/src/lib.rs @@ -27,7 +27,7 @@ use frame_support::{ inherent::{InherentData, ProvideInherent, InherentIdentifier}, }; use codec::{Encode, Decode}; -use sp_runtime::traits::{Header as HeaderT, One, Zero}; +use sp_runtime::traits::{Header as HeaderT, One, Saturating}; use sp_authorship::{INHERENT_IDENTIFIER, UnclesInherentData, InherentError}; const MAX_UNCLES: usize = 10; @@ -298,11 +298,7 @@ impl Pallet { let (minimum_height, maximum_height) = { let uncle_generations = T::UncleGenerations::get(); - let min = if now >= uncle_generations { - now - uncle_generations - } else { - Zero::zero() - }; + let min = now.saturating_sub(uncle_generations); (min, now) }; @@ -329,7 +325,7 @@ impl Pallet { return Err(Error::::OldUncle.into()); } - let duplicate = existing_uncles.into_iter().find(|h| **h == hash).is_some(); + let duplicate = existing_uncles.into_iter().any(|h| *h == hash); let in_chain = >::block_hash(uncle.number()) == hash; if duplicate || in_chain { @@ -341,15 +337,14 @@ impl Pallet { } fn prune_old_uncles(minimum_height: T::BlockNumber) { - let mut uncles = >::get(); + let uncles = >::get(); let prune_entries = uncles.iter().take_while(|item| match item { UncleEntryItem::Uncle(_, _) => true, UncleEntryItem::InclusionHeight(height) => height < &minimum_height, }); let prune_index = prune_entries.count(); - let _ = uncles.drain(..prune_index); - >::put(uncles); + >::put(&uncles[prune_index..]); } } From f4cccc08110bd1c516f2dcc6fe86682cb6118184 Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Mon, 14 Jun 2021 09:16:14 +0200 Subject: [PATCH 45/61] fix ordering of staking weight arguments (#9063) Closes #9054. --- frame/staking/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 0a22f31e6c3f5..30c2a160e9e72 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -2738,8 +2738,8 @@ impl frame_election_provider_support::ElectionDataProvider>::iter().count(); let weight = T::WeightInfo::get_npos_voters( - nominator_count as u32, validator_count as u32, + nominator_count as u32, slashing_span_count as u32, ); Ok((Self::get_npos_voters(), weight)) From a7b641fc25d8d157b2dddccb7f459c4f166596e2 Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Mon, 14 Jun 2021 03:07:09 -0700 Subject: [PATCH 46/61] Allow renaming storage item prefixes (#9016) * Implement parsing for #[pallet::storage_name] on storage items * Rename storage prefix when a #[pallet::storage_name] is supplied * Fix test_storage_info * Rename storage_name to storage_prefix * Check for duplicates when renaming storage prefixes * Allow only string literals for storage_prefix renames * Use proper spans for attribute errors * Check for valid identifiers when parsing storage prefix renames --- .../procedural/src/pallet/expand/storage.rs | 44 +++++++-- .../procedural/src/pallet/parse/storage.rs | 97 ++++++++++++++++--- frame/support/test/tests/pallet.rs | 20 ++++ .../pallet_ui/duplicate_storage_prefix.rs | 21 ++++ .../pallet_ui/duplicate_storage_prefix.stderr | 17 ++++ .../pallet_ui/storage_invalid_attribute.rs | 21 ++++ .../storage_invalid_attribute.stderr | 5 + .../pallet_ui/storage_invalid_rename_value.rs | 18 ++++ .../storage_invalid_rename_value.stderr | 5 + .../pallet_ui/storage_multiple_getters.rs | 25 +++++ .../pallet_ui/storage_multiple_getters.stderr | 5 + .../pallet_ui/storage_multiple_renames.rs | 25 +++++ .../pallet_ui/storage_multiple_renames.stderr | 5 + 13 files changed, 288 insertions(+), 20 deletions(-) create mode 100644 frame/support/test/tests/pallet_ui/duplicate_storage_prefix.rs create mode 100644 frame/support/test/tests/pallet_ui/duplicate_storage_prefix.stderr create mode 100644 frame/support/test/tests/pallet_ui/storage_invalid_attribute.rs create mode 100644 frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr create mode 100644 frame/support/test/tests/pallet_ui/storage_invalid_rename_value.rs create mode 100644 frame/support/test/tests/pallet_ui/storage_invalid_rename_value.stderr create mode 100644 frame/support/test/tests/pallet_ui/storage_multiple_getters.rs create mode 100644 frame/support/test/tests/pallet_ui/storage_multiple_getters.stderr create mode 100644 frame/support/test/tests/pallet_ui/storage_multiple_renames.rs create mode 100644 frame/support/test/tests/pallet_ui/storage_multiple_renames.stderr diff --git a/frame/support/procedural/src/pallet/expand/storage.rs b/frame/support/procedural/src/pallet/expand/storage.rs index c956425379c53..0000051dd9b94 100644 --- a/frame/support/procedural/src/pallet/expand/storage.rs +++ b/frame/support/procedural/src/pallet/expand/storage.rs @@ -15,22 +15,48 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::pallet::Def; +use crate::pallet::{Def, parse::storage::StorageDef}; use crate::pallet::parse::storage::{Metadata, QueryKind, StorageGenerics}; use frame_support_procedural_tools::clean_type_string; +use std::collections::HashSet; /// Generate the prefix_ident related the the storage. /// prefix_ident is used for the prefix struct to be given to storage as first generic param. -fn prefix_ident(storage_ident: &syn::Ident) -> syn::Ident { +fn prefix_ident(storage: &StorageDef) -> syn::Ident { + let storage_ident = &storage.ident; syn::Ident::new(&format!("_GeneratedPrefixForStorage{}", storage_ident), storage_ident.span()) } +/// Check for duplicated storage prefixes. This step is necessary since users can specify an +/// alternative storage prefix using the #[pallet::storage_prefix] syntax, and we need to ensure +/// that the prefix specified by the user is not a duplicate of an existing one. +fn check_prefix_duplicates( + storage_def: &StorageDef, + set: &mut HashSet, +) -> syn::Result<()> { + let prefix = storage_def.prefix(); + + if !set.insert(prefix.clone()) { + let err = syn::Error::new( + storage_def.prefix_span(), + format!("Duplicate storage prefixes found for `{}`", prefix), + ); + return Err(err); + } + + Ok(()) +} + /// * if generics are unnamed: replace the first generic `_` by the generated prefix structure /// * if generics are named: reorder the generic, remove their name, and add the missing ones. /// * Add `#[allow(type_alias_bounds)]` -pub fn process_generics(def: &mut Def) { +pub fn process_generics(def: &mut Def) -> syn::Result<()> { let frame_support = &def.frame_support; + let mut prefix_set = HashSet::new(); + for storage_def in def.storages.iter_mut() { + check_prefix_duplicates(storage_def, &mut prefix_set)?; + let item = &mut def.item.content.as_mut().expect("Checked by def").1[storage_def.index]; let typ_item = match item { @@ -50,7 +76,7 @@ pub fn process_generics(def: &mut Def) { _ => unreachable!("Checked by def"), }; - let prefix_ident = prefix_ident(&storage_def.ident); + let prefix_ident = prefix_ident(&storage_def); let type_use_gen = if def.config.has_instance { quote::quote_spanned!(storage_def.attr_span => T, I) } else { @@ -116,6 +142,8 @@ pub fn process_generics(def: &mut Def) { args.args[0] = syn::parse_quote!( #prefix_ident<#type_use_gen> ); } } + + Ok(()) } /// * generate StoragePrefix structs (e.g. for a storage `MyStorage` a struct with the name @@ -125,7 +153,9 @@ pub fn process_generics(def: &mut Def) { /// * Add `#[allow(type_alias_bounds)]` on storages type alias /// * generate metadatas pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { - process_generics(def); + if let Err(e) = process_generics(def) { + return e.into_compile_error().into(); + } let frame_support = &def.frame_support; let frame_system = &def.frame_system; @@ -344,9 +374,9 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { let prefix_structs = def.storages.iter().map(|storage_def| { let type_impl_gen = &def.type_impl_generics(storage_def.attr_span); let type_use_gen = &def.type_use_generics(storage_def.attr_span); - let prefix_struct_ident = prefix_ident(&storage_def.ident); + let prefix_struct_ident = prefix_ident(&storage_def); let prefix_struct_vis = &storage_def.vis; - let prefix_struct_const = storage_def.ident.to_string(); + let prefix_struct_const = storage_def.prefix(); let config_where_clause = &def.config.where_clause; let cfg_attrs = &storage_def.cfg_attrs; diff --git a/frame/support/procedural/src/pallet/parse/storage.rs b/frame/support/procedural/src/pallet/parse/storage.rs index 6b842ab7fa401..9ec890e66e57a 100644 --- a/frame/support/procedural/src/pallet/parse/storage.rs +++ b/frame/support/procedural/src/pallet/parse/storage.rs @@ -25,28 +25,60 @@ mod keyword { syn::custom_keyword!(Error); syn::custom_keyword!(pallet); syn::custom_keyword!(getter); + syn::custom_keyword!(storage_prefix); syn::custom_keyword!(OptionQuery); syn::custom_keyword!(ValueQuery); } -/// Parse for `#[pallet::getter(fn dummy)]` -pub struct PalletStorageAttr { - getter: syn::Ident, +/// Parse for one of the following: +/// * `#[pallet::getter(fn dummy)]` +/// * `#[pallet::storage_prefix = "CustomName"]` +pub enum PalletStorageAttr { + Getter(syn::Ident, proc_macro2::Span), + StorageName(syn::LitStr, proc_macro2::Span), +} + +impl PalletStorageAttr { + fn attr_span(&self) -> proc_macro2::Span { + match self { + Self::Getter(_, span) | Self::StorageName(_, span) => *span, + } + } } impl syn::parse::Parse for PalletStorageAttr { fn parse(input: syn::parse::ParseStream) -> syn::Result { input.parse::()?; + let attr_span = input.span(); let content; syn::bracketed!(content in input); content.parse::()?; content.parse::()?; - content.parse::()?; - let generate_content; - syn::parenthesized!(generate_content in content); - generate_content.parse::()?; - Ok(Self { getter: generate_content.parse::()? }) + let lookahead = content.lookahead1(); + if lookahead.peek(keyword::getter) { + content.parse::()?; + + let generate_content; + syn::parenthesized!(generate_content in content); + generate_content.parse::()?; + Ok(Self::Getter(generate_content.parse::()?, attr_span)) + } else if lookahead.peek(keyword::storage_prefix) { + content.parse::()?; + content.parse::()?; + + let renamed_prefix = content.parse::()?; + // Ensure the renamed prefix is a proper Rust identifier + syn::parse_str::(&renamed_prefix.value()) + .map_err(|_| { + let msg = format!("`{}` is not a valid identifier", renamed_prefix.value()); + syn::Error::new(renamed_prefix.span(), msg) + })?; + + Ok(Self::StorageName(renamed_prefix, attr_span)) + } else { + Err(lookahead.error()) + } } } @@ -89,6 +121,8 @@ pub struct StorageDef { pub instances: Vec, /// Optional getter to generate. If some then query_kind is ensured to be some as well. pub getter: Option, + /// Optional expression that evaluates to a type that can be used as StoragePrefix instead of ident. + pub rename_as: Option, /// Whereas the querytype of the storage is OptionQuery or ValueQuery. /// Note that this is best effort as it can't be determined when QueryKind is generic, and /// result can be false if user do some unexpected type alias. @@ -105,7 +139,6 @@ pub struct StorageDef { pub named_generics: Option, } - /// The parsed generic from the #[derive(Clone)] pub enum StorageGenerics { @@ -541,6 +574,25 @@ fn extract_key(ty: &syn::Type) -> syn::Result { } impl StorageDef { + /// Return the storage prefix for this storage item + pub fn prefix(&self) -> String { + self + .rename_as + .as_ref() + .map(syn::LitStr::value) + .unwrap_or(self.ident.to_string()) + } + + /// Return either the span of the ident or the span of the literal in the + /// #[storage_prefix] attribute + pub fn prefix_span(&self) -> proc_macro2::Span { + self + .rename_as + .as_ref() + .map(syn::LitStr::span) + .unwrap_or(self.ident.span()) + } + pub fn try_from( attr_span: proc_macro2::Span, index: usize, @@ -552,12 +604,30 @@ impl StorageDef { return Err(syn::Error::new(item.span(), "Invalid pallet::storage, expect item type.")); }; - let mut attrs: Vec = helper::take_item_pallet_attrs(&mut item.attrs)?; - if attrs.len() > 1 { + let attrs: Vec = helper::take_item_pallet_attrs(&mut item.attrs)?; + let (mut getters, mut names) = attrs + .into_iter() + .partition::, _>(|attr| matches!(attr, PalletStorageAttr::Getter(..))); + if getters.len() > 1 { let msg = "Invalid pallet::storage, multiple argument pallet::getter found"; - return Err(syn::Error::new(attrs[1].getter.span(), msg)); + return Err(syn::Error::new(getters[1].attr_span(), msg)); } - let getter = attrs.pop().map(|attr| attr.getter); + if names.len() > 1 { + let msg = "Invalid pallet::storage, multiple argument pallet::storage_prefix found"; + return Err(syn::Error::new(names[1].attr_span(), msg)); + } + let getter = getters.pop().map(|attr| { + match attr { + PalletStorageAttr::Getter(ident, _) => ident, + _ => unreachable!(), + } + }); + let rename_as = names.pop().map(|attr| { + match attr { + PalletStorageAttr::StorageName(lit, _) => lit, + _ => unreachable!(), + } + }); let cfg_attrs = helper::get_item_cfg_attrs(&item.attrs); @@ -609,6 +679,7 @@ impl StorageDef { metadata, docs, getter, + rename_as, query_kind, where_clause, cfg_attrs, diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index a79c25ae8f3e3..412622b3b194d 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -198,6 +198,10 @@ pub mod pallet { #[pallet::storage] pub type Value = StorageValue; + #[pallet::storage] + #[pallet::storage_prefix = "Value2"] + pub type RenamedValue = StorageValue; + #[pallet::type_value] pub fn MyDefault() -> u16 where T::AccountId: From + From + SomeAssociation1 @@ -577,6 +581,10 @@ fn storage_expand() { let k = [twox_128(b"Example"), twox_128(b"Value")].concat(); assert_eq!(unhashed::get::(&k), Some(1u32)); + pallet::RenamedValue::::put(2); + let k = [twox_128(b"Example"), twox_128(b"Value2")].concat(); + assert_eq!(unhashed::get::(&k), Some(2)); + pallet::Map::::insert(1, 2); let mut k = [twox_128(b"Example"), twox_128(b"Map")].concat(); k.extend(1u8.using_encoded(blake2_128_concat)); @@ -697,6 +705,13 @@ fn metadata() { default: DecodeDifferent::Decoded(vec![0]), documentation: DecodeDifferent::Decoded(vec![]), }, + StorageEntryMetadata { + name: DecodeDifferent::Decoded("Value2".to_string()), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(DecodeDifferent::Decoded("u64".to_string())), + default: DecodeDifferent::Decoded(vec![0]), + documentation: DecodeDifferent::Decoded(vec![]), + }, StorageEntryMetadata { name: DecodeDifferent::Decoded("Map".to_string()), modifier: StorageEntryModifier::Default, @@ -993,6 +1008,11 @@ fn test_storage_info() { max_values: Some(1), max_size: Some(4), }, + StorageInfo { + prefix: prefix(b"Example", b"Value2"), + max_values: Some(1), + max_size: Some(8), + }, StorageInfo { prefix: prefix(b"Example", b"Map"), max_values: None, diff --git a/frame/support/test/tests/pallet_ui/duplicate_storage_prefix.rs b/frame/support/test/tests/pallet_ui/duplicate_storage_prefix.rs new file mode 100644 index 0000000000000..d103fa09d991b --- /dev/null +++ b/frame/support/test/tests/pallet_ui/duplicate_storage_prefix.rs @@ -0,0 +1,21 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::StorageValue; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + #[pallet::generate_store(trait Store)] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::storage] + type Foo = StorageValue<_, u8>; + + #[pallet::storage] + #[pallet::storage_prefix = "Foo"] + type NotFoo = StorageValue<_, u16>; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/duplicate_storage_prefix.stderr b/frame/support/test/tests/pallet_ui/duplicate_storage_prefix.stderr new file mode 100644 index 0000000000000..63a6e71e44045 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/duplicate_storage_prefix.stderr @@ -0,0 +1,17 @@ +error: Duplicate storage prefixes found for `Foo` + --> $DIR/duplicate_storage_prefix.rs:16:32 + | +16 | #[pallet::storage_prefix = "Foo"] + | ^^^^^ + +error[E0412]: cannot find type `_GeneratedPrefixForStorageFoo` in this scope + --> $DIR/duplicate_storage_prefix.rs:13:7 + | +13 | type Foo = StorageValue<_, u8>; + | ^^^ not found in this scope + +error[E0121]: the type placeholder `_` is not allowed within types on item signatures + --> $DIR/duplicate_storage_prefix.rs:17:35 + | +17 | type NotFoo = StorageValue<_, u16>; + | ^ not allowed in type signatures diff --git a/frame/support/test/tests/pallet_ui/storage_invalid_attribute.rs b/frame/support/test/tests/pallet_ui/storage_invalid_attribute.rs new file mode 100644 index 0000000000000..c6a88c083135d --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_invalid_attribute.rs @@ -0,0 +1,21 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::call] + impl Pallet {} + + #[pallet::storage] + #[pallet::generate_store(pub trait Store)] + type Foo = StorageValue; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr b/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr new file mode 100644 index 0000000000000..bf93d99cf56bd --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr @@ -0,0 +1,5 @@ +error: expected `getter` or `storage_prefix` + --> $DIR/storage_invalid_attribute.rs:16:12 + | +16 | #[pallet::generate_store(pub trait Store)] + | ^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_invalid_rename_value.rs b/frame/support/test/tests/pallet_ui/storage_invalid_rename_value.rs new file mode 100644 index 0000000000000..c3a08e05e2ac7 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_invalid_rename_value.rs @@ -0,0 +1,18 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::storage] + #[pallet::storage_prefix = "pub"] + type Foo = StorageValue<_, u8>; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_invalid_rename_value.stderr b/frame/support/test/tests/pallet_ui/storage_invalid_rename_value.stderr new file mode 100644 index 0000000000000..513970f98a4f7 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_invalid_rename_value.stderr @@ -0,0 +1,5 @@ +error: `pub` is not a valid identifier + --> $DIR/storage_invalid_rename_value.rs:13:29 + | +13 | #[pallet::storage_prefix = "pub"] + | ^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_multiple_getters.rs b/frame/support/test/tests/pallet_ui/storage_multiple_getters.rs new file mode 100644 index 0000000000000..309b9b24136fa --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_multiple_getters.rs @@ -0,0 +1,25 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::storage] + #[pallet::getter(fn get_foo)] + #[pallet::getter(fn foo_error)] + type Foo = StorageValue<_, u8>; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_multiple_getters.stderr b/frame/support/test/tests/pallet_ui/storage_multiple_getters.stderr new file mode 100644 index 0000000000000..188eed3cb0d17 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_multiple_getters.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::storage, multiple argument pallet::getter found + --> $DIR/storage_multiple_getters.rs:20:3 + | +20 | #[pallet::getter(fn foo_error)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_multiple_renames.rs b/frame/support/test/tests/pallet_ui/storage_multiple_renames.rs new file mode 100644 index 0000000000000..f3caef80a7ee2 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_multiple_renames.rs @@ -0,0 +1,25 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::storage] + #[pallet::storage_prefix = "Bar"] + #[pallet::storage_prefix = "Baz"] + type Foo = StorageValue<_, u8>; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_multiple_renames.stderr b/frame/support/test/tests/pallet_ui/storage_multiple_renames.stderr new file mode 100644 index 0000000000000..9288d131d95af --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_multiple_renames.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::storage, multiple argument pallet::storage_prefix found + --> $DIR/storage_multiple_renames.rs:20:3 + | +20 | #[pallet::storage_prefix = "Baz"] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ From a4b0fd8fa5939879e5a3f9a1a009323ccb4d4a30 Mon Sep 17 00:00:00 2001 From: Qinxuan Chen Date: Mon, 14 Jun 2021 19:00:32 +0800 Subject: [PATCH 47/61] Migrate pallet-randomness-collective-flip to pallet attribute macro (#9061) * migrate pallet-randomness-collective-flip to pallet attribute macro Signed-off-by: koushiro * fix some nits Signed-off-by: koushiro * remove some spacing things Signed-off-by: koushiro * remove space Signed-off-by: koushiro * use tabs Signed-off-by: koushiro --- Cargo.lock | 1 - bin/node-template/runtime/src/lib.rs | 2 + bin/node/runtime/src/lib.rs | 2 + frame/contracts/src/tests.rs | 1 + frame/randomness-collective-flip/Cargo.toml | 8 +- frame/randomness-collective-flip/src/lib.rs | 86 ++++++++++++++------- 6 files changed, 65 insertions(+), 35 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a52f4250b5a64..17651bf4b3a55 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5366,7 +5366,6 @@ dependencies = [ "frame-system", "parity-scale-codec", "safe-mix", - "serde", "sp-core", "sp-io", "sp-runtime", diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index e51a190ae9a0d..f98517b91d24c 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -202,6 +202,8 @@ impl frame_system::Config for Runtime { type OnSetCode = (); } +impl pallet_randomness_collective_flip::Config for Runtime {} + impl pallet_aura::Config for Runtime { type AuthorityId = AuraId; } diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 13189b1ff898a..2665607cc42fa 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -217,6 +217,8 @@ impl frame_system::Config for Runtime { type OnSetCode = (); } +impl pallet_randomness_collective_flip::Config for Runtime {} + impl pallet_utility::Config for Runtime { type Event = Event; type Call = Call; diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index e066a369af0be..3e687643cdc8a 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -235,6 +235,7 @@ impl frame_system::Config for Test { type SS58Prefix = (); type OnSetCode = (); } +impl pallet_randomness_collective_flip::Config for Test {} impl pallet_balances::Config for Test { type MaxLocks = (); type MaxReserves = (); diff --git a/frame/randomness-collective-flip/Cargo.toml b/frame/randomness-collective-flip/Cargo.toml index ad9bcb97837db..5ae350ffcac11 100644 --- a/frame/randomness-collective-flip/Cargo.toml +++ b/frame/randomness-collective-flip/Cargo.toml @@ -16,23 +16,23 @@ targets = ["x86_64-unknown-linux-gnu"] safe-mix = { version = "1.0", default-features = false } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } + frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } -sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] sp-core = { version = "3.0.0", path = "../../primitives/core" } sp-io = { version = "3.0.0", path = "../../primitives/io" } -serde = { version = "1.0.101" } [features] default = ["std"] std = [ "safe-mix/std", - "frame-system/std", "codec/std", - "frame-support/std", "sp-runtime/std", "sp-std/std", + "frame-system/std", + "frame-support/std", ] try-runtime = ["frame-support/try-runtime"] diff --git a/frame/randomness-collective-flip/src/lib.rs b/frame/randomness-collective-flip/src/lib.rs index 724605c6238b6..3285addc5bf48 100644 --- a/frame/randomness-collective-flip/src/lib.rs +++ b/frame/randomness-collective-flip/src/lib.rs @@ -37,33 +37,41 @@ //! ### Example - Get random seed for the current block //! //! ``` -//! use frame_support::{decl_module, dispatch, traits::Randomness}; +//! use frame_support::traits::Randomness; //! -//! pub trait Config: frame_system::Config {} +//! #[frame_support::pallet] +//! pub mod pallet { +//! use frame_support::pallet_prelude::*; +//! use frame_system::pallet_prelude::*; +//! use super::*; //! -//! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { -//! #[weight = 0] -//! pub fn random_module_example(origin) -> dispatch::DispatchResult { -//! let _random_value = >::random(&b"my context"[..]); -//! Ok(()) -//! } -//! } +//! #[pallet::pallet] +//! #[pallet::generate_store(pub(super) trait Store)] +//! pub struct Pallet(_); +//! +//! #[pallet::config] +//! pub trait Config: frame_system::Config + pallet_randomness_collective_flip::Config {} +//! +//! #[pallet::call] +//! impl Pallet { +//! #[pallet::weight(0)] +//! pub fn random_module_example(origin: OriginFor) -> DispatchResult { +//! let _random_value = >::random(&b"my context"[..]); +//! Ok(()) +//! } +//! } //! } //! # fn main() { } //! ``` #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::{prelude::*, convert::TryInto}; -use sp_runtime::traits::{Hash, Saturating}; -use frame_support::{ - decl_module, decl_storage, traits::Randomness, - weights::Weight -}; use safe_mix::TripletMix; + use codec::Encode; -use frame_system::Config; +use sp_std::{prelude::*, convert::TryInto}; +use sp_runtime::traits::{Hash, Saturating}; +use frame_support::traits::Randomness; const RANDOM_MATERIAL_LEN: u32 = 81; @@ -73,8 +81,23 @@ fn block_number_to_index(block_number: T::BlockNumber) -> usize { index.try_into().ok().expect("Something % 81 is always smaller than usize; qed") } -decl_module! { - pub struct Module for enum Call where origin: T::Origin { +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use super::*; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::hooks] + impl Hooks> for Pallet { fn on_initialize(block_number: T::BlockNumber) -> Weight { let parent_hash = >::parent_hash(); @@ -85,21 +108,20 @@ decl_module! { values[index] = parent_hash; }); - 0 + T::DbWeight::get().reads_writes(1, 1) } } -} -decl_storage! { - trait Store for Module as RandomnessCollectiveFlip { - /// Series of block headers from the last 81 blocks that acts as random seed material. This - /// is arranged as a ring buffer with `block_number % 81` being the index into the `Vec` of - /// the oldest hash. - RandomMaterial get(fn random_material): Vec; - } + /// Series of block headers from the last 81 blocks that acts as random seed material. This + /// is arranged as a ring buffer with `block_number % 81` being the index into the `Vec` of + /// the oldest hash. + #[pallet::storage] + #[pallet::getter(fn random_material)] + pub(super) type RandomMaterial = + StorageValue<_, Vec, ValueQuery>; } -impl Randomness for Module { +impl Randomness for Pallet { /// This randomness uses a low-influence function, drawing upon the block hashes from the /// previous 81 blocks. Its result for any given subject will be known far in advance by anyone /// observing the chain. Any block producer has significant influence over their block hashes @@ -140,13 +162,15 @@ impl Randomness for Module { mod tests { use crate as pallet_randomness_collective_flip; use super::*; + use sp_core::H256; use sp_runtime::{ testing::Header, traits::{BlakeTwo256, Header as _, IdentityLookup}, }; - use frame_system::limits; + use frame_support::{parameter_types, traits::{Randomness, OnInitialize}}; + use frame_system::limits; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -196,6 +220,8 @@ mod tests { type OnSetCode = (); } + impl pallet_randomness_collective_flip::Config for Test {} + fn new_test_ext() -> sp_io::TestExternalities { let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); t.into() From ea960d6c5350f381f4748b628079691794055610 Mon Sep 17 00:00:00 2001 From: h4x3rotab Date: Mon, 14 Jun 2021 19:22:39 +0800 Subject: [PATCH 48/61] Improve construct_runtime doc (#9096) - Mention when the pallet definition parts are needed - Rename "module" to "pallet" --- frame/support/procedural/src/lib.rs | 45 +++++++++++++++-------------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/frame/support/procedural/src/lib.rs b/frame/support/procedural/src/lib.rs index 23cb557e6dd7c..d3ddd2360b31f 100644 --- a/frame/support/procedural/src/lib.rs +++ b/frame/support/procedural/src/lib.rs @@ -256,10 +256,10 @@ pub fn decl_storage(input: TokenStream) -> TokenStream { storage::decl_storage_impl(input) } -/// Construct a runtime, with the given name and the given modules. +/// Construct a runtime, with the given name and the given pallets. /// /// The parameters here are specific types for `Block`, `NodeBlock`, and `UncheckedExtrinsic` -/// and the modules that are used by the runtime. +/// and the pallets that are used by the runtime. /// `Block` is the block type that is used in the runtime and `NodeBlock` is the block type /// that is used in the node. For instance they can differ in the extrinsics type. /// @@ -276,7 +276,7 @@ pub fn decl_storage(input: TokenStream) -> TokenStream { /// Test: test::{Pallet, Call} = 1, /// Test2: test_with_long_module::{Pallet, Event}, /// -/// // Module with instances +/// // Pallets with instances /// Test3_Instance1: test3::::{Pallet, Call, Storage, Event, Config, Origin}, /// Test3_DefaultInstance: test3::{Pallet, Call, Storage, Event, Config, Origin} = 4, /// } @@ -284,38 +284,39 @@ pub fn decl_storage(input: TokenStream) -> TokenStream { /// ``` /// /// The identifier `System` is the name of the pallet and the lower case identifier `system` is the -/// name of the Rust module/crate for this Substrate module. The identifiers between the braces are -/// the module parts provided by the pallet. It is important to list these parts here to export +/// name of the Rust module/crate for this Substrate pallet. The identifiers between the braces are +/// the pallet parts provided by the pallet. It is important to list these parts here to export /// them correctly in the metadata or to make the pallet usable in the runtime. /// /// We provide support for the following module parts in a pallet: /// -/// - `Module` -/// - `Call` -/// - `Storage` -/// - `Event` or `Event` (if the event is generic) -/// - `Origin` or `Origin` (if the origin is generic) -/// - `Config` or `Config` (if the config is generic) -/// - `Inherent` - If the module provides/can check inherents. -/// - `ValidateUnsigned` - If the module validates unsigned extrinsics. -/// -/// `= $n` is an optional part allowing to define at which index the module variants in +/// - `Pallet` - Required for all pallets +/// - `Call` - If the pallet has callable functions +/// - `Storage` - If the pallet uses storage +/// - `Event` or `Event` (if the event is generic) - If the pallet emits events +/// - `Origin` or `Origin` (if the origin is generic) - If the pallet has instanciable origins +/// - `Config` or `Config` (if the config is generic) - If the pallet builds the genesis storage +/// with `GenesisConfig` +/// - `Inherent` - If the pallet provides/can check inherents. +/// - `ValidateUnsigned` - If the pallet validates unsigned extrinsics. +/// +/// `= $n` is an optional part allowing to define at which index the pallet variants in /// `OriginCaller`, `Call` and `Event` are encoded, and to define the ModuleToIndex value. /// /// if `= $n` is not given, then index is resolved same as fieldless enum in Rust /// (i.e. incrementedly from previous index): /// ```nocompile -/// module1 .. = 2, -/// module2 .., // Here module2 is given index 3 -/// module3 .. = 0, -/// module4 .., // Here module4 is given index 1 +/// pallet1 .. = 2, +/// pallet2 .., // Here pallet2 is given index 3 +/// pallet3 .. = 0, +/// pallet4 .., // Here pallet4 is given index 1 /// ``` /// /// # Note /// -/// The population of the genesis storage depends on the order of modules. So, if one of your -/// modules depends on another module, the module that is depended upon needs to come before -/// the module depending on it. +/// The population of the genesis storage depends on the order of pallets. So, if one of your +/// pallets depends on another pallet, the pallet that is depended upon needs to come before +/// the pallet depending on it. /// /// # Type definitions /// From c666a251691300c1651075a3b59ba1cf59c5a664 Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Mon, 14 Jun 2021 16:02:45 +0200 Subject: [PATCH 49/61] staking/election: prolonged era and emergency mode for governance submission. (#8912) * Implementation but weird initial era in tests * Emergency mode for elections. (#8918) * do some testing, some logging. * some testing apparatus * genesis election provider (#8970) * genesis election provider * fix historical stuff * Fix test * remove dbg * Apply suggestions from code review Co-authored-by: Zeke Mostov <32168567+emostov@users.noreply.github.com> Co-authored-by: Peter Goodspeed-Niklaus * capitalize comment and name without conflict * fix log * Update frame/election-provider-multi-phase/src/lib.rs * Update frame/election-provider-multi-phase/src/lib.rs Co-authored-by: Peter Goodspeed-Niklaus * apply suggestion on tests * remove testing modifications * Apply suggestions from code review Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Dmitry Kashitsyn * apply suggestion * fix master merge Co-authored-by: kianenigma Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Zeke Mostov <32168567+emostov@users.noreply.github.com> Co-authored-by: Peter Goodspeed-Niklaus Co-authored-by: Dmitry Kashitsyn --- Cargo.lock | 2 + bin/node/runtime/Cargo.toml | 1 + bin/node/runtime/src/lib.rs | 5 +- frame/babe/src/mock.rs | 1 + .../election-provider-multi-phase/src/lib.rs | 184 ++++++++---- frame/grandpa/src/mock.rs | 1 + frame/offences/benchmarking/src/mock.rs | 1 + frame/session/Cargo.toml | 2 + frame/session/benchmarking/src/mock.rs | 1 + frame/session/src/historical/mod.rs | 43 ++- frame/session/src/lib.rs | 27 +- frame/staking/src/benchmarking.rs | 7 +- frame/staking/src/lib.rs | 284 ++++++++++++------ frame/staking/src/mock.rs | 1 + frame/staking/src/tests.rs | 51 +++- 15 files changed, 420 insertions(+), 191 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 17651bf4b3a55..1abbfd3947077 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4360,6 +4360,7 @@ name = "node-runtime" version = "2.0.1" dependencies = [ "frame-benchmarking", + "frame-election-provider-support", "frame-executive", "frame-support", "frame-system", @@ -5425,6 +5426,7 @@ dependencies = [ "frame-system", "impl-trait-for-tuples", "lazy_static", + "log", "pallet-timestamp", "parity-scale-codec", "sp-application-crypto", diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index e57944674fcc4..9b182c4085790 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -43,6 +43,7 @@ frame-benchmarking = { version = "3.1.0", default-features = false, path = "../. frame-support = { version = "3.0.0", default-features = false, path = "../../../frame/support" } frame-system = { version = "3.0.0", default-features = false, path = "../../../frame/system" } frame-system-benchmarking = { version = "3.0.0", default-features = false, path = "../../../frame/system/benchmarking", optional = true } +frame-election-provider-support = { version = "3.0.0", default-features = false, path = "../../../frame/election-provider-support" } frame-system-rpc-runtime-api = { version = "3.0.0", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } frame-try-runtime = { version = "0.9.0", default-features = false, path = "../../../frame/try-runtime", optional = true } pallet-assets = { version = "3.0.0", default-features = false, path = "../../../frame/assets" } diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 2665607cc42fa..3e8053ac4f1bb 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -482,6 +482,7 @@ parameter_types! { pub OffchainRepeat: BlockNumber = 5; } +use frame_election_provider_support::onchain; impl pallet_staking::Config for Runtime { const MAX_NOMINATIONS: u32 = MAX_NOMINATIONS; type Currency = Balances; @@ -505,6 +506,8 @@ impl pallet_staking::Config for Runtime { type NextNewSession = Session; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type ElectionProvider = ElectionProviderMultiPhase; + type GenesisElectionProvider = + onchain::OnChainSequentialPhragmen>; type WeightInfo = pallet_staking::weights::SubstrateWeight; } @@ -515,7 +518,7 @@ parameter_types! { // fallback: no need to do on-chain phragmen initially. pub const Fallback: pallet_election_provider_multi_phase::FallbackStrategy = - pallet_election_provider_multi_phase::FallbackStrategy::OnChain; + pallet_election_provider_multi_phase::FallbackStrategy::Nothing; pub SolutionImprovementThreshold: Perbill = Perbill::from_rational(1u32, 10_000); diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index 236b975817ffd..770e20cb786e2 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -213,6 +213,7 @@ impl pallet_staking::Config for Test { type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type NextNewSession = Session; type ElectionProvider = onchain::OnChainSequentialPhragmen; + type GenesisElectionProvider = Self::ElectionProvider; type WeightInfo = (); } diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 0254525ce819d..2bb47a8778074 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -115,7 +115,23 @@ //! good solution is queued, then the fallback strategy [`pallet::Config::Fallback`] is used to //! determine what needs to be done. The on-chain election is slow, and contains no balancing or //! reduction post-processing. See [`onchain::OnChainSequentialPhragmen`]. The -//! [`FallbackStrategy::Nothing`] should probably only be used for testing, and returns an error. +//! [`FallbackStrategy::Nothing`] just returns an error, and enables the [`Phase::Emergency`]. +//! +//! ### Emergency Phase +//! +//! If, for any of the below reasons: +//! +//! 1. No signed or unsigned solution submitted & Fallback is `None` or failed +//! 2. Internal error +//! +//! A call to `T::ElectionProvider::elect` is made, and `Ok(_)` cannot be returned, then the pallet +//! proceeds to the [`Phase::Emergency`]. During this phase, any solution can be submitted from +//! [`T::ForceOrigin`], without any checking. Once submitted, the forced solution is kept in +//! [`QueuedSolution`] until the next call to `T::ElectionProvider::elect`, where it is returned and +//! [`Phase`] goes back to `Off`. +//! +//! This implies that the user of this pallet (i.e. a staking pallet) should re-try calling +//! `T::ElectionProvider::elect` in case of error until `OK(_)` is returned. //! //! ## Feasible Solution (correct solution) //! @@ -269,7 +285,7 @@ pub type CompactAccuracyOf = as CompactSolution>::Accuracy; pub type OnChainAccuracyOf = ::OnChainAccuracy; /// Wrapper type that implements the configurations needed for the on-chain backup. -struct OnChainConfig(sp_std::marker::PhantomData); +pub struct OnChainConfig(sp_std::marker::PhantomData); impl onchain::Config for OnChainConfig { type AccountId = T::AccountId; type BlockNumber = T::BlockNumber; @@ -312,9 +328,13 @@ pub enum Phase { /// advising validators not to bother running the unsigned offchain worker. /// /// As validator nodes are free to edit their OCW code, they could simply ignore this advisory - /// and always compute their own solution. However, by default, when the unsigned phase is passive, - /// the offchain workers will not bother running. + /// and always compute their own solution. However, by default, when the unsigned phase is + /// passive, the offchain workers will not bother running. Unsigned((bool, Bn)), + /// The emergency phase. This is enabled upon a failing call to `T::ElectionProvider::elect`. + /// After that, the only way to leave this phase is through a successful + /// `T::ElectionProvider::elect`. + Emergency, } impl Default for Phase { @@ -324,6 +344,11 @@ impl Default for Phase { } impl Phase { + /// Whether the phase is emergency or not. + pub fn is_emergency(&self) -> bool { + matches!(self, Phase::Emergency) + } + /// Whether the phase is signed or not. pub fn is_signed(&self) -> bool { matches!(self, Phase::Signed) @@ -582,7 +607,8 @@ pub mod pallet { /// Configuration for the fallback type Fallback: Get; - /// Origin that can set the minimum score. + /// Origin that can control this pallet. Note that any action taken by this origin (such) + /// as providing an emergency solution is not checked. Thus, it must be a trusted origin. type ForceOrigin: EnsureOrigin; /// The configuration of benchmarking. @@ -603,6 +629,13 @@ pub mod pallet { let remaining = next_election - now; let current_phase = Self::current_phase(); + log!( + trace, + "current phase {:?}, next election {:?}, metadata: {:?}", + current_phase, + next_election, + Self::snapshot_metadata() + ); match current_phase { Phase::Off if remaining <= signed_deadline && remaining > unsigned_deadline => { // NOTE: if signed-phase length is zero, second part of the if-condition fails. @@ -612,7 +645,7 @@ pub mod pallet { T::WeightInfo::on_initialize_open_signed().saturating_add(snap_weight) } Err(why) => { - // not much we can do about this at this point. + // Not much we can do about this at this point. log!(warn, "failed to open signed phase due to {:?}", why); T::WeightInfo::on_initialize_nothing() // NOTE: ^^ The trait specifies that this is a noop in terms of weight @@ -623,13 +656,13 @@ pub mod pallet { Phase::Signed | Phase::Off if remaining <= unsigned_deadline && remaining > Zero::zero() => { - // determine if followed by signed or not. + // Determine if followed by signed or not. let (need_snapshot, enabled, signed_weight) = if current_phase == Phase::Signed { - // followed by a signed phase: close the signed phase, no need for snapshot. + // Followed by a signed phase: close the signed phase, no need for snapshot. // TODO: proper weight https://github.com/paritytech/substrate/pull/7910. (false, true, Weight::zero()) } else { - // no signed phase: create a new snapshot, definitely `enable` the unsigned + // No signed phase: create a new snapshot, definitely `enable` the unsigned // phase. (true, true, Weight::zero()) }; @@ -646,7 +679,7 @@ pub mod pallet { base_weight.saturating_add(snap_weight).saturating_add(signed_weight) } Err(why) => { - // not much we can do about this at this point. + // Not much we can do about this at this point. log!(warn, "failed to open unsigned phase due to {:?}", why); T::WeightInfo::on_initialize_nothing() // NOTE: ^^ The trait specifies that this is a noop in terms of weight @@ -661,7 +694,7 @@ pub mod pallet { fn offchain_worker(now: T::BlockNumber) { use sp_runtime::offchain::storage_lock::{StorageLock, BlockAndTime}; - // create a lock with the maximum deadline of number of blocks in the unsigned phase. + // Create a lock with the maximum deadline of number of blocks in the unsigned phase. // This should only come useful in an **abrupt** termination of execution, otherwise the // guard will be dropped upon successful execution. let mut lock = StorageLock::>>::with_block_deadline( @@ -687,7 +720,7 @@ pub mod pallet { assert!(size_of::>() <= size_of::()); // ---------------------------- - // based on the requirements of [`sp_npos_elections::Assignment::try_normalize`]. + // Based on the requirements of [`sp_npos_elections::Assignment::try_normalize`]. let max_vote: usize = as CompactSolution>::LIMIT; // 1. Maximum sum of [ChainAccuracy; 16] must fit into `UpperOf`.. @@ -761,7 +794,7 @@ pub mod pallet { // Check score being an improvement, phase, and desired targets. Self::unsigned_pre_dispatch_checks(&solution).expect(error_message); - // ensure witness was correct. + // Ensure witness was correct. let SolutionOrSnapshotSize { voters, targets } = Self::snapshot_metadata().expect(error_message); @@ -772,7 +805,7 @@ pub mod pallet { let ready = Self::feasibility_check(solution, ElectionCompute::Unsigned).expect(error_message); - // store the newly received solution. + // Store the newly received solution. log!(info, "queued unsigned solution with score {:?}", ready.score); >::put(ready); Self::deposit_event(Event::SolutionStored(ElectionCompute::Unsigned)); @@ -794,6 +827,29 @@ pub mod pallet { >::set(maybe_next_score); Ok(()) } + + /// Set a solution in the queue, to be handed out to the client of this pallet in the next + /// call to `ElectionProvider::elect`. + /// + /// This can only be set by `T::ForceOrigin`, and only when the phase is `Emergency`. + /// + /// The solution is not checked for any feasibility and is assumed to be trustworthy, as any + /// feasibility check itself can in principle cause the election process to fail (due to + /// memory/weight constrains). + #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] + pub fn set_emergency_election_result( + origin: OriginFor, + solution: ReadySolution, + ) -> DispatchResult { + T::ForceOrigin::ensure_origin(origin)?; + ensure!(Self::current_phase().is_emergency(), >::CallNotAllowed); + + // Note: we don't `rotate_round` at this point; the next call to + // `ElectionProvider::elect` will succeed and take care of that. + + >::put(solution); + Ok(()) + } } #[pallet::event] @@ -829,6 +885,8 @@ pub mod pallet { PreDispatchWeakSubmission, /// OCW submitted solution for wrong round OcwCallWrongEra, + /// The call is not allowed at this point. + CallNotAllowed, } #[pallet::origin] @@ -838,7 +896,7 @@ pub mod pallet { type Call = Call; fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { if let Call::submit_unsigned(solution, _) = call { - // discard solution not coming from the local OCW. + // Discard solution not coming from the local OCW. match source { TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ } _ => { @@ -860,10 +918,10 @@ pub mod pallet { solution.score[0].saturated_into() ), ) - // used to deduplicate unsigned solutions: each validator should produce one + // Used to deduplicate unsigned solutions: each validator should produce one // solution per round at most, and solutions are not propagate. .and_provides(solution.round) - // transaction should stay in the pool for the duration of the unsigned phase. + // Transaction should stay in the pool for the duration of the unsigned phase. .longevity(T::UnsignedPhase::get().saturated_into::()) // We don't propagate this. This can never be validated at a remote node. .propagate(false) @@ -950,14 +1008,14 @@ impl Pallet { log!(trace, "lock for offchain worker acquired."); match Self::current_phase() { Phase::Unsigned((true, opened)) if opened == now => { - // mine a new solution, cache it, and attempt to submit it + // Mine a new solution, cache it, and attempt to submit it let initial_output = Self::ensure_offchain_repeat_frequency(now).and_then(|_| { Self::mine_check_save_submit() }); log!(debug, "initial offchain thread output: {:?}", initial_output); } Phase::Unsigned((true, opened)) if opened < now => { - // try and resubmit the cached solution, and recompute ONLY if it is not + // Try and resubmit the cached solution, and recompute ONLY if it is not // feasible. let resubmit_output = Self::ensure_offchain_repeat_frequency(now).and_then(|_| { Self::restore_or_compute_then_maybe_submit() @@ -967,7 +1025,7 @@ impl Pallet { _ => {} } - // after election finalization, clear OCW solution storage. + // After election finalization, clear OCW solution storage. if >::events() .into_iter() .filter_map(|event_record| { @@ -1007,7 +1065,7 @@ impl Pallet { now: T::BlockNumber, ) -> Result { let weight = if need_snapshot { - // if not being followed by a signed phase, then create the snapshots. + // If not being followed by a signed phase, then create the snapshots. debug_assert!(Self::snapshot().is_none()); Self::create_snapshot()? } else { @@ -1037,13 +1095,13 @@ impl Pallet { let (desired_targets, w3) = T::DataProvider::desired_targets().map_err(ElectionError::DataProvider)?; - // defensive-only + // Defensive-only. if targets.len() > target_limit || voters.len() > voter_limit { debug_assert!(false, "Snapshot limit has not been respected."); return Err(ElectionError::DataProvider("Snapshot too big for submission.")); } - // only write snapshot if all existed. + // Only write snapshot if all existed. >::put(SolutionOrSnapshotSize { voters: voters.len() as u32, targets: targets.len() as u32, @@ -1067,10 +1125,10 @@ impl Pallet { ) -> Result, FeasibilityError> { let RawSolution { compact, score, round } = solution; - // first, check round. + // First, check round. ensure!(Self::round() == round, FeasibilityError::InvalidRound); - // winners are not directly encoded in the solution. + // Winners are not directly encoded in the solution. let winners = compact.unique_targets(); let desired_targets = @@ -1081,7 +1139,7 @@ impl Pallet { // upon arrival, thus we would then remove it here. Given overlay it is cheap anyhow ensure!(winners.len() as u32 == desired_targets, FeasibilityError::WrongWinnerCount); - // ensure that the solution's score can pass absolute min-score. + // Ensure that the solution's score can pass absolute min-score. let submitted_score = solution.score.clone(); ensure!( Self::minimum_untrusted_score().map_or(true, |min_score| @@ -1090,7 +1148,7 @@ impl Pallet { FeasibilityError::UntrustedScoreTooLow ); - // read the entire snapshot. + // Read the entire snapshot. let RoundSnapshot { voters: snapshot_voters, targets: snapshot_targets } = Self::snapshot().ok_or(FeasibilityError::SnapshotUnavailable)?; @@ -1100,7 +1158,7 @@ impl Pallet { let target_at = helpers::target_at_fn::(&snapshot_targets); let voter_index = helpers::voter_index_fn_usize::(&cache); - // first, make sure that all the winners are sane. + // First, make sure that all the winners are sane. // OPTIMIZATION: we could first build the assignments, and then extract the winners directly // from that, as that would eliminate a little bit of duplicate work. For now, we keep them // separate: First extract winners separately from compact, and then assignments. This is @@ -1119,19 +1177,19 @@ impl Pallet { let _ = assignments .iter() .map(|ref assignment| { - // check that assignment.who is actually a voter (defensive-only). + // Check that assignment.who is actually a voter (defensive-only). // NOTE: while using the index map from `voter_index` is better than a blind linear // search, this *still* has room for optimization. Note that we had the index when // we did `compact -> assignment` and we lost it. Ideal is to keep the index around. - // defensive-only: must exist in the snapshot. + // Defensive-only: must exist in the snapshot. let snapshot_index = voter_index(&assignment.who).ok_or(FeasibilityError::InvalidVoter)?; - // defensive-only: index comes from the snapshot, must exist. + // Defensive-only: index comes from the snapshot, must exist. let (_voter, _stake, targets) = snapshot_voters.get(snapshot_index).ok_or(FeasibilityError::InvalidVoter)?; - // check that all of the targets are valid based on the snapshot. + // Check that all of the targets are valid based on the snapshot. if assignment.distribution.iter().any(|(d, _)| !targets.contains(d)) { return Err(FeasibilityError::InvalidVote); } @@ -1163,14 +1221,14 @@ impl Pallet { /// 1. Increment round. /// 2. Change phase to [`Phase::Off`] /// 3. Clear all snapshot data. - fn post_elect() { - // inc round + fn rotate_round() { + // Inc round. >::mutate(|r| *r = *r + 1); - // change phase + // Phase is off now. >::put(Phase::Off); - // kill snapshots + // Kill snapshots. Self::kill_snapshot(); } @@ -1220,10 +1278,18 @@ impl ElectionProvider for Pallet { type DataProvider = T::DataProvider; fn elect() -> Result<(Supports, Weight), Self::Error> { - let outcome_and_weight = Self::do_elect(); - // IMPORTANT: regardless of if election was `Ok` or `Err`, we shall do some cleanup. - Self::post_elect(); - outcome_and_weight + match Self::do_elect() { + Ok((supports, weight)) => { + // All went okay, put sign to be Off, clean snapshot, etc. + Self::rotate_round(); + Ok((supports, weight)) + } + Err(why) => { + log!(error, "Entering emergency mode: {:?}", why); + >::put(Phase::Emergency); + Err(why) + } + } } } @@ -1254,7 +1320,7 @@ mod feasibility_check { assert!(MultiPhase::current_phase().is_signed()); let solution = raw_solution(); - // for whatever reason it might be: + // For whatever reason it might be: >::kill(); assert_noop!( @@ -1307,7 +1373,7 @@ mod feasibility_check { assert_eq!(MultiPhase::snapshot().unwrap().targets.len(), 4); // ----------------------------------------------------^^ valid range is [0..3]. - // swap all votes from 3 to 4. This will ensure that the number of unique winners + // Swap all votes from 3 to 4. This will ensure that the number of unique winners // will still be 4, but one of the indices will be gibberish. Requirement is to make // sure 3 a winner, which we don't do here. solution @@ -1333,7 +1399,7 @@ mod feasibility_check { #[test] fn voter_indices() { - // should be caught in `compact.into_assignment`. + // Should be caught in `compact.into_assignment`. ExtBuilder::default().desired_targets(2).build_and_execute(|| { roll_to(::get() - ::get() - ::get()); assert!(MultiPhase::current_phase().is_signed()); @@ -1342,7 +1408,7 @@ mod feasibility_check { assert_eq!(MultiPhase::snapshot().unwrap().voters.len(), 8); // ----------------------------------------------------^^ valid range is [0..7]. - // check that there is a index 7 in votes1, and flip to 8. + // Check that there is an index 7 in votes1, and flip to 8. assert!( solution .compact @@ -1369,7 +1435,7 @@ mod feasibility_check { assert_eq!(MultiPhase::snapshot().unwrap().voters.len(), 8); // ----------------------------------------------------^^ valid range is [0..7]. - // first, check that voter at index 7 (40) actually voted for 3 (40) -- this is self + // First, check that voter at index 7 (40) actually voted for 3 (40) -- this is self // vote. Then, change the vote to 2 (30). assert_eq!( solution @@ -1397,7 +1463,7 @@ mod feasibility_check { let mut solution = raw_solution(); assert_eq!(MultiPhase::snapshot().unwrap().voters.len(), 8); - // simply faff with the score. + // Simply faff with the score. solution.score[0] += 1; assert_noop!( @@ -1457,7 +1523,7 @@ mod tests { assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); assert!(MultiPhase::snapshot().is_some()); - // we close when upstream tells us to elect. + // We close when upstream tells us to elect. roll_to(32); assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); assert!(MultiPhase::snapshot().is_some()); @@ -1540,7 +1606,7 @@ mod tests { roll_to(30); assert!(MultiPhase::current_phase().is_off()); - // this module is now only capable of doing on-chain backup. + // This module is now only capable of doing on-chain backup. assert_ok!(MultiPhase::elect()); assert!(MultiPhase::current_phase().is_off()); @@ -1549,9 +1615,9 @@ mod tests { #[test] fn early_termination() { - // an early termination in the signed phase, with no queued solution. + // An early termination in the signed phase, with no queued solution. ExtBuilder::default().build_and_execute(|| { - // signed phase started at block 15 and will end at 25. + // Signed phase started at block 15 and will end at 25. roll_to(14); assert_eq!(MultiPhase::current_phase(), Phase::Off); @@ -1560,11 +1626,11 @@ mod tests { assert_eq!(MultiPhase::current_phase(), Phase::Signed); assert_eq!(MultiPhase::round(), 1); - // an unexpected call to elect. + // An unexpected call to elect. roll_to(20); MultiPhase::elect().unwrap(); - // we surely can't have any feasible solutions. This will cause an on-chain election. + // We surely can't have any feasible solutions. This will cause an on-chain election. assert_eq!( multi_phase_events(), vec![ @@ -1572,7 +1638,7 @@ mod tests { Event::ElectionFinalized(Some(ElectionCompute::OnChain)) ], ); - // all storage items must be cleared. + // All storage items must be cleared. assert_eq!(MultiPhase::round(), 2); assert!(MultiPhase::snapshot().is_none()); assert!(MultiPhase::snapshot_metadata().is_none()); @@ -1590,7 +1656,7 @@ mod tests { roll_to(25); assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); - // zilch solutions thus far. + // Zilch solutions thus far. let (supports, _) = MultiPhase::elect().unwrap(); assert_eq!( @@ -1609,7 +1675,7 @@ mod tests { roll_to(25); assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); - // zilch solutions thus far. + // Zilch solutions thus far. assert_eq!(MultiPhase::elect().unwrap_err(), ElectionError::NoFallbackConfigured); }) } @@ -1619,15 +1685,15 @@ mod tests { ExtBuilder::default().build_and_execute(|| { Targets::set((0..(TargetIndex::max_value() as AccountId) + 1).collect::>()); - // signed phase failed to open. + // Signed phase failed to open. roll_to(15); assert_eq!(MultiPhase::current_phase(), Phase::Off); - // unsigned phase failed to open. + // Unsigned phase failed to open. roll_to(25); assert_eq!(MultiPhase::current_phase(), Phase::Off); - // on-chain backup works though. + // On-chain backup works though. roll_to(29); let (supports, _) = MultiPhase::elect().unwrap(); assert!(supports.len() > 0); @@ -1642,7 +1708,7 @@ mod tests { let (solution, _) = MultiPhase::mine_solution(2).unwrap(); - // default solution has a score of [50, 100, 5000]. + // Default solution has a score of [50, 100, 5000]. assert_eq!(solution.score, [50, 100, 5000]); >::put([49, 0, 0]); diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index 752d94ce19085..fe8a1bd4a3951 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -219,6 +219,7 @@ impl pallet_staking::Config for Test { type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type NextNewSession = Session; type ElectionProvider = onchain::OnChainSequentialPhragmen; + type GenesisElectionProvider = Self::ElectionProvider; type WeightInfo = (); } diff --git a/frame/offences/benchmarking/src/mock.rs b/frame/offences/benchmarking/src/mock.rs index 7230c1215afc9..b780662b92cd7 100644 --- a/frame/offences/benchmarking/src/mock.rs +++ b/frame/offences/benchmarking/src/mock.rs @@ -178,6 +178,7 @@ impl pallet_staking::Config for Test { type NextNewSession = Session; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type ElectionProvider = onchain::OnChainSequentialPhragmen; + type GenesisElectionProvider = Self::ElectionProvider; type WeightInfo = (); } diff --git a/frame/session/Cargo.toml b/frame/session/Cargo.toml index 44e1f2f67858b..efe7bc133fb4d 100644 --- a/frame/session/Cargo.toml +++ b/frame/session/Cargo.toml @@ -24,6 +24,7 @@ frame-support = { version = "3.0.0", default-features = false, path = "../suppor frame-system = { version = "3.0.0", default-features = false, path = "../system" } pallet-timestamp = { version = "3.0.0", default-features = false, path = "../timestamp" } sp-trie = { version = "3.0.0", optional = true, default-features = false, path = "../../primitives/trie" } +log = { version = "0.4.0", default-features = false } impl-trait-for-tuples = "0.2.1" [dev-dependencies] @@ -44,5 +45,6 @@ std = [ "sp-staking/std", "pallet-timestamp/std", "sp-trie/std", + "log/std", ] try-runtime = ["frame-support/try-runtime"] diff --git a/frame/session/benchmarking/src/mock.rs b/frame/session/benchmarking/src/mock.rs index 87d1242812db2..591e54f067bb5 100644 --- a/frame/session/benchmarking/src/mock.rs +++ b/frame/session/benchmarking/src/mock.rs @@ -183,6 +183,7 @@ impl pallet_staking::Config for Test { type NextNewSession = Session; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type ElectionProvider = onchain::OnChainSequentialPhragmen; + type GenesisElectionProvider = Self::ElectionProvider; type WeightInfo = (); } diff --git a/frame/session/src/historical/mod.rs b/frame/session/src/historical/mod.rs index 8902ebe551f6c..3cfcbf98bf38c 100644 --- a/frame/session/src/historical/mod.rs +++ b/frame/session/src/historical/mod.rs @@ -124,10 +124,17 @@ impl ValidatorSetWithIdentification for Module { /// Specialization of the crate-level `SessionManager` which returns the set of full identification /// when creating a new session. -pub trait SessionManager: crate::SessionManager { +pub trait SessionManager: + crate::SessionManager +{ /// If there was a validator set change, its returns the set of new validators along with their /// full identifications. fn new_session(new_index: SessionIndex) -> Option>; + fn new_session_genesis( + new_index: SessionIndex, + ) -> Option> { + >::new_session(new_index) + } fn start_session(start_index: SessionIndex); fn end_session(end_index: SessionIndex); } @@ -136,19 +143,20 @@ pub trait SessionManager: crate::SessionManager /// sets the historical trie root of the ending session. pub struct NoteHistoricalRoot(sp_std::marker::PhantomData<(T, I)>); -impl crate::SessionManager for NoteHistoricalRoot - where I: SessionManager -{ - fn new_session(new_index: SessionIndex) -> Option> { - +impl> NoteHistoricalRoot { + fn do_new_session(new_index: SessionIndex, is_genesis: bool) -> Option> { StoredRange::mutate(|range| { range.get_or_insert_with(|| (new_index, new_index)).1 = new_index + 1; }); - let new_validators_and_id = >::new_session(new_index); - let new_validators = new_validators_and_id.as_ref().map(|new_validators| { - new_validators.iter().map(|(v, _id)| v.clone()).collect() - }); + let new_validators_and_id = if is_genesis { + >::new_session_genesis(new_index) + } else { + >::new_session(new_index) + }; + let new_validators_opt = new_validators_and_id + .as_ref() + .map(|new_validators| new_validators.iter().map(|(v, _id)| v.clone()).collect()); if let Some(new_validators) = new_validators_and_id { let count = new_validators.len() as ValidatorCount; @@ -166,7 +174,20 @@ impl crate::SessionManager for NoteHistoricalRoot< } } - new_validators + new_validators_opt + } +} + +impl crate::SessionManager for NoteHistoricalRoot +where + I: SessionManager, +{ + fn new_session(new_index: SessionIndex) -> Option> { + Self::do_new_session(new_index, false) + } + + fn new_session_genesis(new_index: SessionIndex) -> Option> { + Self::do_new_session(new_index, true) } fn start_session(start_index: SessionIndex) { diff --git a/frame/session/src/lib.rs b/frame/session/src/lib.rs index 547d29715d9c1..933aff02972f8 100644 --- a/frame/session/src/lib.rs +++ b/frame/session/src/lib.rs @@ -238,12 +238,19 @@ pub trait SessionManager { /// `new_session(session)` is guaranteed to be called before `end_session(session-1)`. In other /// words, a new session must always be planned before an ongoing one can be finished. fn new_session(new_index: SessionIndex) -> Option>; + /// Same as `new_session`, but it this should only be called at genesis. + /// + /// The session manager might decide to treat this in a different way. Default impl is simply + /// using [`new_session`]. + fn new_session_genesis(new_index: SessionIndex) -> Option> { + Self::new_session(new_index) + } /// End the session. /// /// Because the session pallet can queue validator set the ending session can be lower than the /// last new session index. fn end_session(end_index: SessionIndex); - /// Start the session. + /// Start an already planned session. /// /// The session start to be used for validation. fn start_session(start_index: SessionIndex); @@ -340,13 +347,9 @@ impl SessionHandler for Tuple { pub struct TestSessionHandler; impl SessionHandler for TestSessionHandler { const KEY_TYPE_IDS: &'static [KeyTypeId] = &[sp_runtime::key_types::DUMMY]; - fn on_genesis_session(_: &[(AId, Ks)]) {} - fn on_new_session(_: bool, _: &[(AId, Ks)], _: &[(AId, Ks)]) {} - fn on_before_session_ending() {} - fn on_disabled(_: usize) {} } @@ -451,7 +454,7 @@ decl_storage! { } } - let initial_validators_0 = T::SessionManager::new_session(0) + let initial_validators_0 = T::SessionManager::new_session_genesis(0) .unwrap_or_else(|| { frame_support::print("No initial validator provided by `SessionManager`, use \ session config keys to generate initial validator set."); @@ -459,7 +462,7 @@ decl_storage! { }); assert!(!initial_validators_0.is_empty(), "Empty validator set for session 0 in genesis block!"); - let initial_validators_1 = T::SessionManager::new_session(1) + let initial_validators_1 = T::SessionManager::new_session_genesis(1) .unwrap_or_else(|| initial_validators_0.clone()); assert!(!initial_validators_1.is_empty(), "Empty validator set for session 1 in genesis block!"); @@ -548,7 +551,7 @@ decl_module! { /// Actual cost depends on the number of length of `T::Keys::key_ids()` which is fixed. /// - DbReads: `T::ValidatorIdOf`, `NextKeys`, `origin account` /// - DbWrites: `NextKeys`, `origin account` - /// - DbWrites per key id: `KeyOwnder` + /// - DbWrites per key id: `KeyOwner` /// # #[weight = T::WeightInfo::purge_keys()] pub fn purge_keys(origin) { @@ -573,17 +576,17 @@ decl_module! { } impl Module { - /// Move on to next session. Register new validator set and session keys. Changes - /// to the validator set have a session of delay to take effect. This allows for - /// equivocation punishment after a fork. + /// Move on to next session. Register new validator set and session keys. Changes to the + /// validator set have a session of delay to take effect. This allows for equivocation + /// punishment after a fork. pub fn rotate_session() { let session_index = CurrentIndex::get(); + log::trace!(target: "runtime::session", "rotating session {:?}", session_index); let changed = QueuedChanged::get(); // Inform the session handlers that a session is going to end. T::SessionHandler::on_before_session_ending(); - T::SessionManager::end_session(session_index); // Get queued session keys and validators. diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index 800d3379d7e3c..2ad939e5b166c 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -91,7 +91,7 @@ pub fn create_validator_with_nominators( ValidatorCount::::put(1); // Start a new Era - let new_validators = Staking::::new_era(SessionIndex::one()).unwrap(); + let new_validators = Staking::::try_trigger_new_era(SessionIndex::one(), true).unwrap(); assert_eq!(new_validators.len(), 1); assert_eq!(new_validators[0], v_stash, "Our validator was not selected!"); @@ -484,7 +484,8 @@ benchmarks! { )?; let session_index = SessionIndex::one(); }: { - let validators = Staking::::new_era(session_index).ok_or("`new_era` failed")?; + let validators = Staking::::try_trigger_new_era(session_index, true) + .ok_or("`new_era` failed")?; assert!(validators.len() == v as usize); } @@ -500,7 +501,7 @@ benchmarks! { None, )?; // Start a new Era - let new_validators = Staking::::new_era(SessionIndex::one()).unwrap(); + let new_validators = Staking::::try_trigger_new_era(SessionIndex::one(), true).unwrap(); assert!(new_validators.len() == v as usize); let current_era = CurrentEra::::get().unwrap(); diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 30c2a160e9e72..58ab459d1bf28 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -304,7 +304,7 @@ use sp_runtime::{ curve::PiecewiseLinear, traits::{ Convert, Zero, StaticLookup, CheckedSub, Saturating, SaturatedConversion, - AtLeast32BitUnsigned, + AtLeast32BitUnsigned, Bounded, }, }; use sp_staking::{ @@ -542,7 +542,7 @@ impl StakingLedger where if !slash_from_target.is_zero() { *target -= slash_from_target; - // don't leave a dust balance in the staking system. + // Don't leave a dust balance in the staking system. if *target <= minimum_balance { slash_from_target += *target; *value += sp_std::mem::replace(target, Zero::zero()); @@ -560,10 +560,10 @@ impl StakingLedger where slash_out_of(total, &mut chunk.value, &mut value); chunk.value }) - .take_while(|value| value.is_zero()) // take all fully-consumed chunks out. + .take_while(|value| value.is_zero()) // Take all fully-consumed chunks out. .count(); - // kill all drained chunks. + // Kill all drained chunks. let _ = self.unlocking.drain(..i); pre_total.saturating_sub(*total) @@ -719,6 +719,8 @@ pub enum Forcing { /// Not forcing anything - just let whatever happen. NotForcing, /// Force a new era, then reset to `NotForcing` as soon as it is done. + /// Note that this will force to trigger an election until a new era is triggered, if the + /// election failed, the next session end will trigger a new election again, until success. ForceNew, /// Avoid a new era indefinitely. ForceNone, @@ -831,6 +833,13 @@ pub mod pallet { DataProvider = Pallet, >; + /// Something that provides the election functionality at genesis. + type GenesisElectionProvider: frame_election_provider_support::ElectionProvider< + Self::AccountId, + Self::BlockNumber, + DataProvider = Pallet, + >; + /// Maximum number of nominations per nominator. const MAX_NOMINATIONS: u32; @@ -1245,6 +1254,8 @@ pub mod pallet { Withdrawn(T::AccountId, BalanceOf), /// A nominator has been kicked from a validator. \[nominator, stash\] Kicked(T::AccountId, T::AccountId), + /// The election failed. No new era is planned. + StakingElectionFailed, } #[pallet::error] @@ -1376,7 +1387,7 @@ pub mod pallet { Err(Error::::AlreadyPaired)? } - // reject a bond which is considered to be _dust_. + // Reject a bond which is considered to be _dust_. if value < T::Currency::minimum_balance() { Err(Error::::InsufficientValue)? } @@ -1442,7 +1453,7 @@ pub mod pallet { let extra = extra.min(max_additional); ledger.total += extra; ledger.active += extra; - // last check: the new active amount of ledger must be more than ED. + // Last check: the new active amount of ledger must be more than ED. ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientValue); Self::deposit_event(Event::::Bonded(stash, extra)); @@ -1560,7 +1571,7 @@ pub mod pallet { // portion to fall below existential deposit + will have no more unlocking chunks // left. We can now safely remove all staking-related information. Self::kill_stash(&stash, num_slashing_spans)?; - // remove the lock. + // Remove the lock. T::Currency::remove_lock(STAKING_ID, &stash); // This is worst case scenario, so we use the full weight and return None None @@ -1653,7 +1664,7 @@ pub mod pallet { let nominations = Nominations { targets, - // initial nominations are considered submitted at era 0. See `Nominations` doc + // Initial nominations are considered submitted at era 0. See `Nominations` doc submitted_in: Self::current_era().unwrap_or(0), suppressed: false, }; @@ -1805,6 +1816,12 @@ pub mod pallet { /// /// The dispatch origin must be Root. /// + /// # Warning + /// + /// The election process starts multiple blocks before the end of the era. + /// Thus the election process may be ongoing when this is called. In this case the + /// election will continue until the next era is triggered. + /// /// # /// - No arguments. /// - Weight: O(1) @@ -1822,6 +1839,12 @@ pub mod pallet { /// /// The dispatch origin must be Root. /// + /// # Warning + /// + /// The election process starts multiple blocks before the end of the era. + /// If this is called just before a new era is triggered, the election process may not + /// have enough blocks to get a result. + /// /// # /// - No arguments. /// - Weight: O(1) @@ -1870,10 +1893,10 @@ pub mod pallet { ) -> DispatchResult { ensure_root(origin)?; - // remove all staking-related information. + // Remove all staking-related information. Self::kill_stash(&stash, num_slashing_spans)?; - // remove the lock. + // Remove the lock. T::Currency::remove_lock(STAKING_ID, &stash); Ok(()) } @@ -1882,6 +1905,12 @@ pub mod pallet { /// /// The dispatch origin must be Root. /// + /// # Warning + /// + /// The election process starts multiple blocks before the end of the era. + /// If this is called just before a new era is triggered, the election process may not + /// have enough blocks to get a result. + /// /// # /// - Weight: O(1) /// - Write: ForceEra @@ -1992,7 +2021,7 @@ pub mod pallet { ensure!(!ledger.unlocking.is_empty(), Error::::NoUnlockChunk); let ledger = ledger.rebond(value); - // last check: the new active amount of ledger must be more than ED. + // Last check: the new active amount of ledger must be more than ED. ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientValue); Self::deposit_event(Event::::Bonded(ledger.stash.clone(), value)); @@ -2299,10 +2328,9 @@ impl Pallet { } /// Plan a new session potentially trigger a new era. - fn new_session(session_index: SessionIndex) -> Option> { + fn new_session(session_index: SessionIndex, is_genesis: bool) -> Option> { if let Some(current_era) = Self::current_era() { // Initial era has been set. - let current_era_start_session_index = Self::eras_start_session_index(current_era) .unwrap_or_else(|| { frame_support::print("Error: start_session_index must be set for current_era"); @@ -2313,25 +2341,32 @@ impl Pallet { .unwrap_or(0); // Must never happen. match ForceEra::::get() { - // Will set to default again, which is `NotForcing`. - Forcing::ForceNew => ForceEra::::kill(), - // Short circuit to `new_era`. + // Will be set to `NotForcing` again if a new era has been triggered. + Forcing::ForceNew => (), + // Short circuit to `try_trigger_new_era`. Forcing::ForceAlways => (), - // Only go to `new_era` if deadline reached. + // Only go to `try_trigger_new_era` if deadline reached. Forcing::NotForcing if era_length >= T::SessionsPerEra::get() => (), _ => { - // either `Forcing::ForceNone`, + // Either `Forcing::ForceNone`, // or `Forcing::NotForcing if era_length >= T::SessionsPerEra::get()`. return None }, } - // new era. - Self::new_era(session_index) + // New era. + let maybe_new_era_validators = Self::try_trigger_new_era(session_index, is_genesis); + if maybe_new_era_validators.is_some() + && matches!(ForceEra::::get(), Forcing::ForceNew) + { + ForceEra::::put(Forcing::NotForcing); + } + + maybe_new_era_validators } else { - // Set initial era + // Set initial era. log!(debug, "Starting the first era."); - Self::new_era(session_index) + Self::try_trigger_new_era(session_index, is_genesis) } } @@ -2390,12 +2425,12 @@ impl Pallet { if active_era > bonding_duration { let first_kept = active_era - bonding_duration; - // prune out everything that's from before the first-kept index. + // Prune out everything that's from before the first-kept index. let n_to_prune = bonded.iter() .take_while(|&&(era_idx, _)| era_idx < first_kept) .count(); - // kill slashing metadata. + // Kill slashing metadata. for (pruned_era, _) in bonded.drain(..n_to_prune) { slashing::clear_era_metadata::(pruned_era); } @@ -2428,77 +2463,105 @@ impl Pallet { } } - /// Plan a new era. Return the potential new staking set. - fn new_era(start_session_index: SessionIndex) -> Option> { + /// Plan a new era. + /// + /// * Bump the current era storage (which holds the latest planned era). + /// * Store start session index for the new planned era. + /// * Clean old era information. + /// * Store staking information for the new planned era + /// + /// Returns the new validator set. + pub fn trigger_new_era( + start_session_index: SessionIndex, + exposures: Vec<(T::AccountId, Exposure>)>, + ) -> Vec { // Increment or set current era. - let current_era = CurrentEra::::mutate(|s| { + let new_planned_era = CurrentEra::::mutate(|s| { *s = Some(s.map(|s| s + 1).unwrap_or(0)); s.unwrap() }); - ErasStartSessionIndex::::insert(¤t_era, &start_session_index); + ErasStartSessionIndex::::insert(&new_planned_era, &start_session_index); // Clean old era information. - if let Some(old_era) = current_era.checked_sub(Self::history_depth() + 1) { + if let Some(old_era) = new_planned_era.checked_sub(Self::history_depth() + 1) { Self::clear_era_information(old_era); } - // Set staking information for new era. - let maybe_new_validators = Self::enact_election(current_era); - - maybe_new_validators + // Set staking information for the new era. + Self::store_stakers_info(exposures, new_planned_era) } - /// Enact and process the election using the `ElectionProvider` type. + /// Potentially plan a new era. + /// + /// Get election result from `T::ElectionProvider`. + /// In case election result has more than [`MinimumValidatorCount`] validator trigger a new era. /// - /// This will also process the election, as noted in [`process_election`]. - fn enact_election(current_era: EraIndex) -> Option> { - T::ElectionProvider::elect() - .map_err(|e| { - log!(warn, "election provider failed due to {:?}", e) + /// In case a new era is planned, the new validator set is returned. + fn try_trigger_new_era(start_session_index: SessionIndex, is_genesis: bool) -> Option> { + let (election_result, weight) = if is_genesis { + T::GenesisElectionProvider::elect().map_err(|e| { + log!(warn, "genesis election provider failed due to {:?}", e); + Self::deposit_event(Event::StakingElectionFailed); }) - .and_then(|(res, weight)| { - >::register_extra_weight_unchecked( - weight, - frame_support::weights::DispatchClass::Mandatory, - ); - Self::process_election(res, current_era) + } else { + T::ElectionProvider::elect().map_err(|e| { + log!(warn, "election provider failed due to {:?}", e); + Self::deposit_event(Event::StakingElectionFailed); }) - .ok() - } + } + .ok()?; - /// Process the output of the election. - /// - /// This ensures enough validators have been elected, converts all supports to exposures and - /// writes them to the associated storage. - /// - /// Returns `Err(())` if less than [`MinimumValidatorCount`] validators have been elected, `Ok` - /// otherwise. - pub fn process_election( - flat_supports: frame_election_provider_support::Supports, - current_era: EraIndex, - ) -> Result, ()> { - let exposures = Self::collect_exposures(flat_supports); - let elected_stashes = exposures.iter().cloned().map(|(x, _)| x).collect::>(); + >::register_extra_weight_unchecked( + weight, + frame_support::weights::DispatchClass::Mandatory, + ); + + let exposures = Self::collect_exposures(election_result); - if (elected_stashes.len() as u32) < Self::minimum_validator_count().max(1) { + if (exposures.len() as u32) < Self::minimum_validator_count().max(1) { // Session will panic if we ever return an empty validator set, thus max(1) ^^. - if current_era > 0 { - log!( + match CurrentEra::::get() { + Some(current_era) if current_era > 0 => log!( warn, - "chain does not have enough staking candidates to operate for era {:?} ({} elected, minimum is {})", - current_era, - elected_stashes.len(), + "chain does not have enough staking candidates to operate for era {:?} ({} \ + elected, minimum is {})", + CurrentEra::::get().unwrap_or(0), + exposures.len(), Self::minimum_validator_count(), - ); + ), + None => { + // The initial era is allowed to have no exposures. + // In this case the SessionManager is expected to choose a sensible validator + // set. + // TODO: this should be simplified #8911 + CurrentEra::::put(0); + ErasStartSessionIndex::::insert(&0, &start_session_index); + }, + _ => () } - return Err(()); + + Self::deposit_event(Event::StakingElectionFailed); + return None } + Self::deposit_event(Event::StakingElection); + Some(Self::trigger_new_era(start_session_index, exposures)) + } + + /// Process the output of the election. + /// + /// Store staking information for the new planned era + pub fn store_stakers_info( + exposures: Vec<(T::AccountId, Exposure>)>, + new_planned_era: EraIndex, + ) -> Vec { + let elected_stashes = exposures.iter().cloned().map(|(x, _)| x).collect::>(); + // Populate stakers, exposures, and the snapshot of validator prefs. let mut total_stake: BalanceOf = Zero::zero(); exposures.into_iter().for_each(|(stash, exposure)| { total_stake = total_stake.saturating_add(exposure.total); - >::insert(current_era, &stash, &exposure); + >::insert(new_planned_era, &stash, &exposure); let mut exposure_clipped = exposure; let clipped_max_len = T::MaxNominatorRewardedPerValidator::get() as usize; @@ -2506,31 +2569,28 @@ impl Pallet { exposure_clipped.others.sort_by(|a, b| a.value.cmp(&b.value).reverse()); exposure_clipped.others.truncate(clipped_max_len); } - >::insert(¤t_era, &stash, exposure_clipped); + >::insert(&new_planned_era, &stash, exposure_clipped); }); // Insert current era staking information - >::insert(¤t_era, total_stake); + >::insert(&new_planned_era, total_stake); - // collect the pref of all winners + // Collect the pref of all winners. for stash in &elected_stashes { let pref = Self::validators(stash); - >::insert(¤t_era, stash, pref); + >::insert(&new_planned_era, stash, pref); } - // emit event - Self::deposit_event(Event::::StakingElection); - - if current_era > 0 { + if new_planned_era > 0 { log!( info, "new validator set of size {:?} has been processed for era {:?}", elected_stashes.len(), - current_era, + new_planned_era, ); } - Ok(elected_stashes) + elected_stashes } /// Consume a set of [`Supports`] from [`sp_npos_elections`] and collect them into a @@ -2546,7 +2606,7 @@ impl Pallet { supports .into_iter() .map(|(validator, support)| { - // build `struct exposure` from `support` + // Build `struct exposure` from `support`. let mut others = Vec::with_capacity(support.voters.len()); let mut own: BalanceOf = Zero::zero(); let mut total: BalanceOf = Zero::zero(); @@ -2681,12 +2741,12 @@ impl Pallet { let mut all_voters = Vec::new(); for (validator, _) in >::iter() { - // append self vote + // Append self vote. let self_vote = (validator.clone(), weight_of(&validator), vec![validator.clone()]); all_voters.push(self_vote); } - // collect all slashing spans into a BTreeMap for further queries. + // Collect all slashing spans into a BTreeMap for further queries. let slashing_spans = >::iter().collect::>(); for (nominator, nominations) in >::iter() { @@ -2765,18 +2825,23 @@ impl frame_election_provider_support::ElectionDataProvider::get() { + Forcing::ForceNone => Bounded::max_value(), + Forcing::ForceNew | Forcing::ForceAlways => Zero::zero(), + Forcing::NotForcing if era_length >= T::SessionsPerEra::get() => Zero::zero(), + Forcing::NotForcing => T::SessionsPerEra::get() + .saturating_sub(era_length) + // One session is computed in this_session_end. + .saturating_sub(1) + .into(), + }; now.saturating_add( until_this_session_end.saturating_add(sessions_left.saturating_mul(session_length)), @@ -2841,16 +2906,21 @@ impl frame_election_provider_support::ElectionDataProvider pallet_session::SessionManager for Pallet { fn new_session(new_index: SessionIndex) -> Option> { - log!(trace, "planning new_session({})", new_index); + log!(trace, "planning new session {}", new_index); + CurrentPlannedSession::::put(new_index); + Self::new_session(new_index, false) + } + fn new_session_genesis(new_index: SessionIndex) -> Option> { + log!(trace, "planning new session {} at genesis", new_index); CurrentPlannedSession::::put(new_index); - Self::new_session(new_index) + Self::new_session(new_index, true) } fn start_session(start_index: SessionIndex) { - log!(trace, "starting start_session({})", start_index); + log!(trace, "starting session {}", start_index); Self::start_session(start_index) } fn end_session(end_index: SessionIndex) { - log!(trace, "ending end_session({})", end_index); + log!(trace, "ending session {}", end_index); Self::end_session(end_index) } } @@ -2872,6 +2942,20 @@ impl historical::SessionManager Option>)>> { + >::new_session_genesis(new_index).map(|validators| { + let current_era = Self::current_era() + // Must be some as a new era has been created. + .unwrap_or(0); + + validators.into_iter().map(|v| { + let exposure = Self::eras_stakers(current_era, &v); + (v, exposure) + }).collect() + }) + } fn start_session(start_index: SessionIndex) { >::start_session(start_index) } @@ -2960,7 +3044,7 @@ where let active_era = Self::active_era(); add_db_reads_writes(1, 0); if active_era.is_none() { - // this offence need not be re-submitted. + // This offence need not be re-submitted. return consumed_weight } active_era.expect("value checked not to be `None`; qed").index @@ -2974,7 +3058,7 @@ where let window_start = active_era.saturating_sub(T::BondingDuration::get()); - // fast path for active-era report - most likely. + // Fast path for active-era report - most likely. // `slash_session` cannot be in a future active era. It must be in `active_era` or before. let slash_era = if slash_session >= active_era_start_session_index { active_era @@ -2982,10 +3066,10 @@ where let eras = BondedEras::::get(); add_db_reads_writes(1, 0); - // reverse because it's more likely to find reports from recent eras. + // Reverse because it's more likely to find reports from recent eras. match eras.iter().rev().filter(|&&(_, ref sesh)| sesh <= &slash_session).next() { Some(&(ref slash_era, _)) => *slash_era, - // before bonding period. defensive - should be filtered out. + // Before bonding period. defensive - should be filtered out. None => return consumed_weight, } }; @@ -3031,7 +3115,7 @@ where } unapplied.reporters = details.reporters.clone(); if slash_defer_duration == 0 { - // apply right away. + // Apply right away. slashing::apply_slash::(unapplied); { let slash_cost = (6, 5); @@ -3042,7 +3126,7 @@ where ); } } else { - // defer to end of some `slash_defer_duration` from now. + // Defer to end of some `slash_defer_duration` from now. ::UnappliedSlashes::mutate( active_era, move |for_later| for_later.push(unapplied), @@ -3071,7 +3155,7 @@ where O: Offence, { fn report_offence(reporters: Vec, offence: O) -> Result<(), OffenceError> { - // disallow any slashing from before the current bonding period. + // Disallow any slashing from before the current bonding period. let offence_session = offence.session_index(); let bonded_eras = BondedEras::::get(); diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 211cc025300e0..f58cdf0d2350f 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -260,6 +260,7 @@ impl Config for Test { type NextNewSession = Session; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type ElectionProvider = onchain::OnChainSequentialPhragmen; + type GenesisElectionProvider = Self::ElectionProvider; type WeightInfo = (); } diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 4473e89585002..ee8f78769e70a 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -440,13 +440,26 @@ fn no_candidate_emergency_condition() { let res = Staking::chill(Origin::signed(10)); assert_ok!(res); - // trigger era - mock::start_active_era(1); + let current_era = CurrentEra::::get(); + + // try trigger new era + mock::run_to_block(20); + assert_eq!( + *staking_events().last().unwrap(), + Event::StakingElectionFailed, + ); + // No new era is created + assert_eq!(current_era, CurrentEra::::get()); + + // Go to far further session to see if validator have changed + mock::run_to_block(100); - // Previous ones are elected. chill is invalidates. TODO: #2494 + // Previous ones are elected. chill is not effective in active era (as era hasn't changed) assert_eq_uvec!(validator_controllers(), vec![10, 20, 30, 40]); - // Though the validator preferences has been removed. - assert!(Staking::validators(11) != prefs); + // The chill is still pending. + assert!(!::Validators::contains_key(11)); + // No new era is created. + assert_eq!(current_era, CurrentEra::::get()); }); } @@ -3970,6 +3983,34 @@ mod election_data_provider { *staking_events().last().unwrap(), Event::StakingElection ); + + Staking::force_no_eras(Origin::root()).unwrap(); + assert_eq!(Staking::next_election_prediction(System::block_number()), u64::max_value()); + + Staking::force_new_era_always(Origin::root()).unwrap(); + assert_eq!(Staking::next_election_prediction(System::block_number()), 45 + 5); + + Staking::force_new_era(Origin::root()).unwrap(); + assert_eq!(Staking::next_election_prediction(System::block_number()), 45 + 5); + + // Do a fail election + MinimumValidatorCount::::put(1000); + run_to_block(50); + // Election: failed, next session is a new election + assert_eq!(Staking::next_election_prediction(System::block_number()), 50 + 5); + // The new era is still forced until a new era is planned. + assert_eq!(ForceEra::::get(), Forcing::ForceNew); + + MinimumValidatorCount::::put(2); + run_to_block(55); + assert_eq!(Staking::next_election_prediction(System::block_number()), 55 + 25); + assert_eq!(staking_events().len(), 6); + assert_eq!( + *staking_events().last().unwrap(), + Event::StakingElection + ); + // The new era has been planned, forcing is changed from `ForceNew` to `NotForcing`. + assert_eq!(ForceEra::::get(), Forcing::NotForcing); }) } } From 3bb42b9aaba65e956aea7aaef2c2fb3468fa3aaf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 17 Jun 2021 09:27:53 +0200 Subject: [PATCH 50/61] Aura: Skip initialize block & remove cache (#9132) This instructs the Aura runtime api to skip initialize block, when requesting the authorities. This is important, as we don't want to use the new authorities that should be used from the next block on. Besides that, it removes the caching stuff. The cache is not available on full nodes anyway. In the future we should store the authorities probably in the aux store. --- client/consensus/aura/src/import_queue.rs | 39 ----------------------- client/consensus/aura/src/lib.rs | 13 +++----- primitives/consensus/aura/src/lib.rs | 1 + 3 files changed, 5 insertions(+), 48 deletions(-) diff --git a/client/consensus/aura/src/import_queue.rs b/client/consensus/aura/src/import_queue.rs index 8034fd08a7eb6..c3faa5382686e 100644 --- a/client/consensus/aura/src/import_queue.rs +++ b/client/consensus/aura/src/import_queue.rs @@ -317,43 +317,6 @@ impl Verifier for AuraVerifier w } } -fn initialize_authorities_cache(client: &C) -> Result<(), ConsensusError> where - A: Codec + Debug, - B: BlockT, - C: ProvideRuntimeApi + BlockOf + ProvideCache + UsageProvider, - C::Api: AuraApi, -{ - // no cache => no initialization - let cache = match client.cache() { - Some(cache) => cache, - None => return Ok(()), - }; - - let best_hash = client.usage_info().chain.best_hash; - - // check if we already have initialized the cache - let map_err = |error| sp_consensus::Error::from(sp_consensus::Error::ClientImport( - format!( - "Error initializing authorities cache: {}", - error, - ))); - - let block_id = BlockId::hash(best_hash); - let authorities: Option> = cache - .get_at(&well_known_cache_keys::AUTHORITIES, &block_id) - .unwrap_or(None) - .and_then(|(_, _, v)| Decode::decode(&mut &v[..]).ok()); - if authorities.is_some() { - return Ok(()); - } - - let authorities = crate::authorities(client, &block_id)?; - cache.initialize(&well_known_cache_keys::AUTHORITIES, authorities.encode()) - .map_err(map_err)?; - - Ok(()) -} - /// Should we check for equivocation of a block author? #[derive(Debug, Clone, Copy)] pub enum CheckForEquivocation { @@ -438,8 +401,6 @@ pub fn import_queue<'a, P, Block, I, C, S, CAW, CIDP>( CIDP: CreateInherentDataProviders + Sync + Send + 'static, CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { - initialize_authorities_cache(&*client)?; - let verifier = build_verifier::( BuildVerifierParams { client, diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 702e4dc0bf1bd..d0b0cefe8ddca 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -45,7 +45,7 @@ use sp_consensus::{ BlockOrigin, Error as ConsensusError, SelectChain, }; use sc_client_api::{backend::AuxStore, BlockOf, UsageProvider}; -use sp_blockchain::{Result as CResult, well_known_cache_keys, ProvideCache, HeaderBackend}; +use sp_blockchain::{Result as CResult, ProvideCache, HeaderBackend}; use sp_core::crypto::Public; use sp_application_crypto::{AppKey, AppPublic}; use sp_runtime::{generic::BlockId, traits::NumberFor}; @@ -546,14 +546,9 @@ fn authorities(client: &C, at: &BlockId) -> Result, Consensus C: ProvideRuntimeApi + BlockOf + ProvideCache, C::Api: AuraApi, { - client - .cache() - .and_then(|cache| cache - .get_at(&well_known_cache_keys::AUTHORITIES, at) - .unwrap_or(None) - .and_then(|(_, _, v)| Decode::decode(&mut &v[..]).ok()) - ) - .or_else(|| AuraApi::authorities(&*client.runtime_api(), at).ok()) + client.runtime_api() + .authorities(at) + .ok() .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet.into()) } diff --git a/primitives/consensus/aura/src/lib.rs b/primitives/consensus/aura/src/lib.rs index a28e681fda27f..ef888a2ab855b 100644 --- a/primitives/consensus/aura/src/lib.rs +++ b/primitives/consensus/aura/src/lib.rs @@ -90,6 +90,7 @@ sp_api::decl_runtime_apis! { fn slot_duration() -> SlotDuration; // Return the current set of authorities. + #[skip_initialize_block] fn authorities() -> Vec; } } From 159a5aade9a6104fa70779a38a326b2cddc69da3 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Thu, 17 Jun 2021 18:01:27 +0200 Subject: [PATCH 51/61] double the allocator limit (#9102) * double the allocator limit * 32 MiB should be enough for everybody. * Update doc Co-authored-by: Sergei Shulepov --- primitives/allocator/src/freeing_bump.rs | 33 +++++++++++++++++++----- 1 file changed, 26 insertions(+), 7 deletions(-) diff --git a/primitives/allocator/src/freeing_bump.rs b/primitives/allocator/src/freeing_bump.rs index 14746c8784f8d..64ba136f9a354 100644 --- a/primitives/allocator/src/freeing_bump.rs +++ b/primitives/allocator/src/freeing_bump.rs @@ -36,7 +36,7 @@ //! //! For implementing freeing we maintain a linked lists for each order. The maximum supported //! allocation size is capped, therefore the number of orders and thus the linked lists is as well -//! limited. Currently, the maximum size of an allocation is 16 MiB. +//! limited. Currently, the maximum size of an allocation is 32 MiB. //! //! When the allocator serves an allocation request it first checks the linked list for the respective //! order. If it doesn't have any free chunks, the allocator requests memory from the bump allocator. @@ -44,6 +44,24 @@ //! //! Upon deallocation we get the order of the allocation from its header and then add that //! allocation to the linked list for the respective order. +//! +//! # Caveats +//! +//! This is a fast allocator but it is also dumb. There are specifically two main shortcomings +//! that the user should keep in mind: +//! +//! - Once the bump allocator space is exhausted, there is no way to reclaim the memory. This means +//! that it's possible to end up in a situation where there are no live allocations yet a new +//! allocation will fail. +//! +//! Let's look into an example. Given a heap of 32 MiB. The user makes a 32 MiB allocation that we +//! call `X` . Now the heap is full. Then user deallocates `X`. Since all the space in the bump +//! allocator was consumed by the 32 MiB allocation, allocations of all sizes except 32 MiB will +//! fail. +//! +//! - Sizes of allocations are rounded up to the nearest order. That is, an allocation of 2,00001 MiB +//! will be put into the bucket of 4 MiB. Therefore, typically more than half of the space in allocation +//! will be wasted. This is more pronounced with larger allocation sizes. use crate::Error; use sp_std::{mem, convert::{TryFrom, TryInto}, ops::{Range, Index, IndexMut}}; @@ -78,15 +96,15 @@ macro_rules! trace { // The minimum possible allocation size is chosen to be 8 bytes because in that case we would have // easier time to provide the guaranteed alignment of 8. // -// The maximum possible allocation size was chosen rather arbitrary. 16 MiB should be enough for +// The maximum possible allocation size was chosen rather arbitrary. 32 MiB should be enough for // everybody. // // N_ORDERS - represents the number of orders supported. // // This number corresponds to the number of powers between the minimum possible allocation and -// maximum possible allocation, or: 2^3...2^24 (both ends inclusive, hence 22). -const N_ORDERS: usize = 22; -const MAX_POSSIBLE_ALLOCATION: u32 = 16777216; // 2^24 bytes, 16 MiB +// maximum possible allocation, or: 2^3...2^25 (both ends inclusive, hence 23). +const N_ORDERS: usize = 23; +const MAX_POSSIBLE_ALLOCATION: u32 = 33554432; // 2^25 bytes, 32 MiB const MIN_POSSIBLE_ALLOCATION: u32 = 8; // 2^3 bytes, 8 bytes /// The exponent for the power of two sized block adjusted to the minimum size. @@ -100,6 +118,7 @@ const MIN_POSSIBLE_ALLOCATION: u32 = 8; // 2^3 bytes, 8 bytes /// 64 | 3 /// ... /// 16777216 | 21 +/// 33554432 | 22 /// /// and so on. #[derive(Copy, Clone, PartialEq, Eq, Debug)] @@ -329,7 +348,7 @@ impl FreeingBumpHeapAllocator { } /// Gets requested number of bytes to allocate and returns a pointer. - /// The maximum size which can be allocated at once is 16 MiB. + /// The maximum size which can be allocated at once is 32 MiB. /// There is no minimum size, but whatever size is passed into /// this function is rounded to the next power of two. If the requested /// size is below 8 bytes it will be rounded up to 8 bytes. @@ -813,7 +832,7 @@ mod tests { #[test] fn should_get_max_item_size_from_index() { // given - let raw_order = 21; + let raw_order = 22; // when let item_size = Order::from_raw(raw_order).unwrap().size(); From 8fb89f411469adba41f846fc268f68726d45788c Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Wed, 16 Jun 2021 05:57:14 +0100 Subject: [PATCH 52/61] Merge 8920 --- frame/staking/src/benchmarking.rs | 53 +++- frame/staking/src/lib.rs | 295 ++++++++++++++++++--- frame/staking/src/mock.rs | 25 +- frame/staking/src/testing_utils.rs | 6 +- frame/staking/src/tests.rs | 397 +++++++++++++++++++---------- frame/staking/src/weights.rs | 284 +++++++++++---------- 6 files changed, 752 insertions(+), 308 deletions(-) diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index 2ad939e5b166c..8adf797abe9e9 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -30,6 +30,7 @@ pub use frame_benchmarking::{ const SEED: u32 = 0; const MAX_SPANS: u32 = 100; const MAX_VALIDATORS: u32 = 1000; +const MAX_NOMINATORS: u32 = 1000; const MAX_SLASHES: u32 = 1000; // Add slashing spans to a user account. Not relevant for actual use, only to benchmark @@ -463,12 +464,18 @@ benchmarks! { reap_stash { let s in 1 .. MAX_SPANS; let (stash, controller) = create_stash_controller::(0, 100, Default::default())?; + Staking::::validate(RawOrigin::Signed(controller.clone()).into(), ValidatorPrefs::default())?; add_slashing_spans::(&stash, s); T::Currency::make_free_balance_be(&stash, T::Currency::minimum_balance()); whitelist_account!(controller); + + assert!(Bonded::::contains_key(&stash)); + assert!(Validators::::contains_key(&stash)); + }: _(RawOrigin::Signed(controller), stash.clone(), s) verify { assert!(!Bonded::::contains_key(&stash)); + assert!(!Validators::::contains_key(&stash)); } new_era { @@ -563,9 +570,9 @@ benchmarks! { get_npos_voters { // number of validator intention. - let v in 200 .. 400; + let v in (MAX_VALIDATORS / 2) .. MAX_VALIDATORS; // number of nominator intention. - let n in 200 .. 400; + let n in (MAX_NOMINATORS / 2) .. MAX_NOMINATORS; // total number of slashing spans. Assigned to validators randomly. let s in 1 .. 20; @@ -584,15 +591,42 @@ benchmarks! { get_npos_targets { // number of validator intention. - let v in 200 .. 400; + let v in (MAX_VALIDATORS / 2) .. MAX_VALIDATORS; // number of nominator intention. - let n = 500; + let n = MAX_NOMINATORS; let _ = create_validators_with_nominators_for_era::(v, n, T::MAX_NOMINATIONS as usize, false, None)?; }: { let targets = >::get_npos_targets(); assert_eq!(targets.len() as u32, v); } + + update_staking_limits { + // This function always does the same thing... just write to 4 storage items. + }: _( + RawOrigin::Root, + BalanceOf::::max_value(), + BalanceOf::::max_value(), + Some(u32::max_value()), + Some(u32::max_value()) + ) verify { + assert_eq!(MinNominatorBond::::get(), BalanceOf::::max_value()); + assert_eq!(MinValidatorBond::::get(), BalanceOf::::max_value()); + assert_eq!(MaxNominatorsCount::::get(), Some(u32::max_value())); + assert_eq!(MaxValidatorsCount::::get(), Some(u32::max_value())); + } + + chill_other { + let (_, controller) = create_stash_controller::(USER_SEED, 100, Default::default())?; + Staking::::validate(RawOrigin::Signed(controller.clone()).into(), ValidatorPrefs::default())?; + Staking::::update_staking_limits( + RawOrigin::Root.into(), BalanceOf::::max_value(), BalanceOf::::max_value(), None, None, + )?; + let caller = whitelisted_caller(); + }: _(RawOrigin::Signed(caller), controller.clone()) + verify { + assert!(!Validators::::contains_key(controller)); + } } #[cfg(test)] @@ -603,7 +637,7 @@ mod tests { #[test] fn create_validators_with_nominators_for_era_works() { - ExtBuilder::default().has_stakers(true).build().execute_with(|| { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { let v = 10; let n = 100; @@ -625,7 +659,7 @@ mod tests { #[test] fn create_validator_with_nominators_works() { - ExtBuilder::default().has_stakers(true).build().execute_with(|| { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { let n = 10; let (validator_stash, nominators) = create_validator_with_nominators::( @@ -649,7 +683,7 @@ mod tests { #[test] fn add_slashing_spans_works() { - ExtBuilder::default().has_stakers(true).build().execute_with(|| { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { let n = 10; let (validator_stash, _nominators) = create_validator_with_nominators::( @@ -680,7 +714,7 @@ mod tests { #[test] fn test_payout_all() { - ExtBuilder::default().has_stakers(true).build().execute_with(|| { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { let v = 10; let n = 100; @@ -700,6 +734,7 @@ mod tests { impl_benchmark_test_suite!( Staking, - crate::mock::ExtBuilder::default().has_stakers(true).build(), + crate::mock::ExtBuilder::default().has_stakers(true), crate::mock::Test, + exec_name = build_and_execute ); diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 58ab459d1bf28..aa66efc3cce0c 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -745,17 +745,46 @@ enum Releases { V4_0_0, V5_0_0, // blockable validators. V6_0_0, // removal of all storage associated with offchain phragmen. + V7_0_0, // keep track of number of nominators / validators in map } impl Default for Releases { fn default() -> Self { - Releases::V6_0_0 + Releases::V7_0_0 } } pub mod migrations { use super::*; + pub mod v7 { + use super::*; + + pub fn pre_migrate() -> Result<(), &'static str> { + assert!(CurrentValidatorsCount::::get().is_zero(), "CurrentValidatorsCount already set."); + assert!(CurrentNominatorsCount::::get().is_zero(), "CurrentNominatorsCount already set."); + assert!(StorageVersion::::get() == Releases::V6_0_0); + Ok(()) + } + + pub fn migrate() -> Weight { + log!(info, "Migrating staking to Releases::V7_0_0"); + let validator_count = Validators::::iter().count() as u32; + let nominator_count = Nominators::::iter().count() as u32; + + CurrentValidatorsCount::::put(validator_count); + CurrentNominatorsCount::::put(nominator_count); + + StorageVersion::::put(Releases::V7_0_0); + log!(info, "Completed staking migration to Releases::V7_0_0"); + + T::DbWeight::get().reads_writes( + validator_count.saturating_add(nominator_count).into(), + 2, + ) + } + } + pub mod v6 { use super::*; use frame_support::{traits::Get, weights::Weight, generate_storage_alias}; @@ -940,6 +969,14 @@ pub mod pallet { #[pallet::getter(fn bonded)] pub type Bonded = StorageMap<_, Twox64Concat, T::AccountId, T::AccountId>; + /// The minimum active bond to become and maintain the role of a nominator. + #[pallet::storage] + pub type MinNominatorBond = StorageValue<_, BalanceOf, ValueQuery>; + + /// The minimum active bond to become and maintain the role of a validator. + #[pallet::storage] + pub type MinValidatorBond = StorageValue<_, BalanceOf, ValueQuery>; + /// Map from all (unlocked) "controller" accounts to the info regarding the staking. #[pallet::storage] #[pallet::getter(fn ledger)] @@ -960,15 +997,39 @@ pub mod pallet { >; /// The map from (wannabe) validator stash key to the preferences of that validator. + /// + /// When updating this storage item, you must also update the `CurrentValidatorsCount`. #[pallet::storage] #[pallet::getter(fn validators)] pub type Validators = StorageMap<_, Twox64Concat, T::AccountId, ValidatorPrefs, ValueQuery>; + /// A tracker to keep count of the number of items in the `Validators` map. + #[pallet::storage] + pub type CurrentValidatorsCount = StorageValue<_, u32, ValueQuery>; + + /// The maximum validator count before we stop allowing new validators to join. + /// + /// When this value is not set, no limits are enforced. + #[pallet::storage] + pub type MaxValidatorsCount = StorageValue<_, u32, OptionQuery>; + /// The map from nominator stash key to the set of stash keys of all validators to nominate. + /// + /// When updating this storage item, you must also update the `CurrentNominatorsCount`. #[pallet::storage] #[pallet::getter(fn nominators)] pub type Nominators = StorageMap<_, Twox64Concat, T::AccountId, Nominations>; + /// A tracker to keep count of the number of items in the `Nominators` map. + #[pallet::storage] + pub type CurrentNominatorsCount = StorageValue<_, u32, ValueQuery>; + + /// The maximum nominator count before we stop allowing new validators to join. + /// + /// When this value is not set, no limits are enforced. + #[pallet::storage] + pub type MaxNominatorsCount = StorageValue<_, u32, OptionQuery>; + /// The current era index. /// /// This is the latest planned era, depending on how the Session pallet queues the validator @@ -1165,6 +1226,8 @@ pub mod pallet { pub slash_reward_fraction: Perbill, pub canceled_payout: BalanceOf, pub stakers: Vec<(T::AccountId, T::AccountId, BalanceOf, StakerStatus)>, + pub min_nominator_bond: BalanceOf, + pub min_validator_bond: BalanceOf, } #[cfg(feature = "std")] @@ -1179,6 +1242,8 @@ pub mod pallet { slash_reward_fraction: Default::default(), canceled_payout: Default::default(), stakers: Default::default(), + min_nominator_bond: Default::default(), + min_validator_bond: Default::default(), } } } @@ -1194,6 +1259,8 @@ pub mod pallet { CanceledSlashPayout::::put(self.canceled_payout); SlashRewardFraction::::put(self.slash_reward_fraction); StorageVersion::::put(Releases::V6_0_0); + MinNominatorBond::::put(self.min_nominator_bond); + MinValidatorBond::::put(self.min_validator_bond); for &(ref stash, ref controller, balance, ref status) in &self.stakers { assert!( @@ -1274,8 +1341,8 @@ pub mod pallet { DuplicateIndex, /// Slash record index out of bounds. InvalidSlashIndex, - /// Can not bond with value less than minimum balance. - InsufficientValue, + /// Can not bond with value less than minimum required. + InsufficientBond, /// Can not schedule more unlock chunks. NoMoreChunks, /// Can not rebond without unlocking chunks. @@ -1300,18 +1367,35 @@ pub mod pallet { TooManyTargets, /// A nomination target was supplied that was blocked or otherwise not a validator. BadTarget, + /// The user has enough bond and thus cannot be chilled forcefully by an external person. + CannotChillOther, + /// There are too many nominators in the system. Governance needs to adjust the staking settings + /// to keep things safe for the runtime. + TooManyNominators, + /// There are too many validators in the system. Governance needs to adjust the staking settings + /// to keep things safe for the runtime. + TooManyValidators, } #[pallet::hooks] impl Hooks> for Pallet { fn on_runtime_upgrade() -> Weight { - if StorageVersion::::get() == Releases::V5_0_0 { - migrations::v6::migrate::() + if StorageVersion::::get() == Releases::V6_0_0 { + migrations::v7::migrate::() } else { T::DbWeight::get().reads(1) } } + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result<(), &'static str> { + if StorageVersion::::get() == Releases::V6_0_0 { + migrations::v7::pre_migrate::() + } else { + Ok(()) + } + } + fn on_initialize(_now: BlockNumberFor) -> Weight { // just return the weight of the on_finalize. T::DbWeight::get().reads(1) @@ -1389,7 +1473,7 @@ pub mod pallet { // Reject a bond which is considered to be _dust_. if value < T::Currency::minimum_balance() { - Err(Error::::InsufficientValue)? + Err(Error::::InsufficientBond)? } frame_system::Pallet::::inc_consumers(&stash).map_err(|_| Error::::BadState)?; @@ -1454,7 +1538,7 @@ pub mod pallet { ledger.total += extra; ledger.active += extra; // Last check: the new active amount of ledger must be more than ED. - ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientValue); + ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientBond); Self::deposit_event(Event::::Bonded(stash, extra)); Self::update_ledger(&controller, &ledger); @@ -1473,6 +1557,9 @@ pub mod pallet { /// can co-exists at the same time. In that case, [`Call::withdraw_unbonded`] need /// to be called first to remove some of the chunks (if possible). /// + /// If a user encounters the `InsufficientBond` error when calling this extrinsic, + /// they should call `chill` first in order to free up their bonded funds. + /// /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. /// And, it can be only called when [`EraElectionStatus`] is `Closed`. /// @@ -1514,6 +1601,18 @@ pub mod pallet { ledger.active = Zero::zero(); } + let min_active_bond = if Nominators::::contains_key(&ledger.stash) { + MinNominatorBond::::get() + } else if Validators::::contains_key(&ledger.stash) { + MinValidatorBond::::get() + } else { + Zero::zero() + }; + + // Make sure that the user maintains enough active bond for their role. + // If a user runs into this error, they should chill first. + ensure!(ledger.active >= min_active_bond, Error::::InsufficientBond); + // Note: in case there is no current era it is fine to bond one era more. let era = Self::current_era().unwrap_or(0) + T::BondingDuration::get(); ledger.unlocking.push(UnlockChunk { value, era }); @@ -1614,10 +1713,19 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::validate())] pub fn validate(origin: OriginFor, prefs: ValidatorPrefs) -> DispatchResult { let controller = ensure_signed(origin)?; + + // If this error is reached, we need to adjust the `MinValidatorBond` and start calling `chill_other`. + // Until then, we explicitly block new validators to protect the runtime. + if let Some(max_validators) = MaxValidatorsCount::::get() { + ensure!(CurrentValidatorsCount::::get() < max_validators, Error::::TooManyValidators); + } + let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + ensure!(ledger.active >= MinValidatorBond::::get(), Error::::InsufficientBond); + let stash = &ledger.stash; - >::remove(stash); - >::insert(stash, prefs); + Self::do_remove_nominator(stash); + Self::do_add_validator(stash, prefs); Ok(()) } @@ -1646,7 +1754,16 @@ pub mod pallet { targets: Vec<::Source>, ) -> DispatchResult { let controller = ensure_signed(origin)?; + + // If this error is reached, we need to adjust the `MinNominatorBond` and start calling `chill_other`. + // Until then, we explicitly block new nominators to protect the runtime. + if let Some(max_nominators) = MaxNominatorsCount::::get() { + ensure!(CurrentNominatorsCount::::get() < max_nominators, Error::::TooManyNominators); + } + let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + ensure!(ledger.active >= MinNominatorBond::::get(), Error::::InsufficientBond); + let stash = &ledger.stash; ensure!(!targets.is_empty(), Error::::EmptyTargets); ensure!(targets.len() <= T::MAX_NOMINATIONS as usize, Error::::TooManyTargets); @@ -1669,8 +1786,8 @@ pub mod pallet { suppressed: false, }; - >::remove(stash); - >::insert(stash, &nominations); + Self::do_remove_validator(stash); + Self::do_add_nominator(stash, nominations); Ok(()) } @@ -2022,7 +2139,7 @@ pub mod pallet { let ledger = ledger.rebond(value); // Last check: the new active amount of ledger must be more than ED. - ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientValue); + ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientBond); Self::deposit_event(Event::::Bonded(ledger.stash.clone(), value)); Self::update_ledger(&controller, &ledger); @@ -2135,6 +2252,80 @@ pub mod pallet { Ok(()) } + + /// Update the various staking limits this pallet. + /// + /// * `min_nominator_bond`: The minimum active bond needed to be a nominator. + /// * `min_validator_bond`: The minimum active bond needed to be a validator. + /// * `max_nominator_count`: The max number of users who can be a nominator at once. + /// When set to `None`, no limit is enforced. + /// * `max_validator_count`: The max number of users who can be a validator at once. + /// When set to `None`, no limit is enforced. + /// + /// Origin must be Root to call this function. + /// + /// NOTE: Existing nominators and validators will not be affected by this update. + /// to kick people under the new limits, `chill_other` should be called. + #[pallet::weight(T::WeightInfo::update_staking_limits())] + pub fn update_staking_limits( + origin: OriginFor, + min_nominator_bond: BalanceOf, + min_validator_bond: BalanceOf, + max_nominator_count: Option, + max_validator_count: Option, + ) -> DispatchResult { + ensure_root(origin)?; + MinNominatorBond::::set(min_nominator_bond); + MinValidatorBond::::set(min_validator_bond); + MaxNominatorsCount::::set(max_nominator_count); + MaxValidatorsCount::::set(max_validator_count); + Ok(()) + } + + /// Declare a `controller` as having no desire to either validator or nominate. + /// + /// Effects will be felt at the beginning of the next era. + /// + /// The dispatch origin for this call must be _Signed_, but can be called by anyone. + /// + /// If the caller is the same as the controller being targeted, then no further checks + /// are enforced. However, this call can also be made by an third party user who witnesses + /// that this controller does not satisfy the minimum bond requirements to be in their role. + /// + /// This can be helpful if bond requirements are updated, and we need to remove old users + /// who do not satisfy these requirements. + /// + // TODO: Maybe we can deprecate `chill` in the future. + // https://github.com/paritytech/substrate/issues/9111 + #[pallet::weight(T::WeightInfo::chill_other())] + pub fn chill_other( + origin: OriginFor, + controller: T::AccountId, + ) -> DispatchResult { + // Anyone can call this function. + let caller = ensure_signed(origin)?; + let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + let stash = ledger.stash; + + // If the caller is not the controller, we want to check that the minimum bond + // requirements are not satisfied, and thus we have reason to chill this user. + // + // Otherwise, if caller is the same as the controller, this is just like `chill`. + if caller != controller { + let min_active_bond = if Nominators::::contains_key(&stash) { + MinNominatorBond::::get() + } else if Validators::::contains_key(&stash) { + MinValidatorBond::::get() + } else { + Zero::zero() + }; + + ensure!(ledger.active < min_active_bond, Error::::CannotChillOther); + } + + Self::chill_stash(&stash); + Ok(()) + } } } @@ -2296,8 +2487,8 @@ impl Pallet { /// Chill a stash account. fn chill_stash(stash: &T::AccountId) { - >::remove(stash); - >::remove(stash); + Self::do_remove_validator(stash); + Self::do_remove_nominator(stash); } /// Actually make a payment to a staker. This uses the currency's reward function @@ -2645,8 +2836,8 @@ impl Pallet { >::remove(&controller); >::remove(stash); - >::remove(stash); - >::remove(stash); + Self::do_remove_validator(stash); + Self::do_remove_nominator(stash); frame_system::Pallet::::dec_consumers(stash); @@ -2749,7 +2940,7 @@ impl Pallet { // Collect all slashing spans into a BTreeMap for further queries. let slashing_spans = >::iter().collect::>(); - for (nominator, nominations) in >::iter() { + for (nominator, nominations) in Nominators::::iter() { let Nominations { submitted_in, mut targets, suppressed: _ } = nominations; // Filter out nomination targets which were nominated before the most recent @@ -2769,8 +2960,49 @@ impl Pallet { all_voters } + /// This is a very expensive function and result should be cached versus being called multiple times. pub fn get_npos_targets() -> Vec { - >::iter().map(|(v, _)| v).collect::>() + Validators::::iter().map(|(v, _)| v).collect::>() + } + + /// This function will add a nominator to the `Nominators` storage map, + /// and keep track of the `CurrentNominatorsCount`. + /// + /// If the nominator already exists, their nominations will be updated. + pub fn do_add_nominator(who: &T::AccountId, nominations: Nominations) { + if !Nominators::::contains_key(who) { + CurrentNominatorsCount::::mutate(|x| x.saturating_inc()) + } + Nominators::::insert(who, nominations); + } + + /// This function will remove a nominator from the `Nominators` storage map, + /// and keep track of the `CurrentNominatorsCount`. + pub fn do_remove_nominator(who: &T::AccountId) { + if Nominators::::contains_key(who) { + Nominators::::remove(who); + CurrentNominatorsCount::::mutate(|x| x.saturating_dec()); + } + } + + /// This function will add a validator to the `Validators` storage map, + /// and keep track of the `CurrentValidatorsCount`. + /// + /// If the validator already exists, their preferences will be updated. + pub fn do_add_validator(who: &T::AccountId, prefs: ValidatorPrefs) { + if !Validators::::contains_key(who) { + CurrentValidatorsCount::::mutate(|x| x.saturating_inc()) + } + Validators::::insert(who, prefs); + } + + /// This function will remove a validator from the `Validators` storage map, + /// and keep track of the `CurrentValidatorsCount`. + pub fn do_remove_validator(who: &T::AccountId) { + if Validators::::contains_key(who) { + Validators::::remove(who); + CurrentValidatorsCount::::mutate(|x| x.saturating_dec()); + } } } @@ -2785,12 +3017,11 @@ impl frame_election_provider_support::ElectionDataProvider, ) -> data_provider::Result<(Vec<(T::AccountId, VoteWeight, Vec)>, Weight)> { - // NOTE: reading these counts already needs to iterate a lot of storage keys, but they get - // cached. This is okay for the case of `Ok(_)`, but bad for `Err(_)`, as the trait does not - // report weight in failures. - let nominator_count = >::iter().count(); - let validator_count = >::iter().count(); - let voter_count = nominator_count.saturating_add(validator_count); + let nominator_count = CurrentNominatorsCount::::get(); + let validator_count = CurrentValidatorsCount::::get(); + let voter_count = nominator_count.saturating_add(validator_count) as usize; + debug_assert!(>::iter().count() as u32 == CurrentNominatorsCount::::get()); + debug_assert!(>::iter().count() as u32 == CurrentValidatorsCount::::get()); if maybe_max_len.map_or(false, |max_len| voter_count > max_len) { return Err("Voter snapshot too big"); @@ -2798,15 +3029,15 @@ impl frame_election_provider_support::ElectionDataProvider>::iter().count(); let weight = T::WeightInfo::get_npos_voters( - validator_count as u32, - nominator_count as u32, + nominator_count, + validator_count, slashing_span_count as u32, ); Ok((Self::get_npos_voters(), weight)) } fn targets(maybe_max_len: Option) -> data_provider::Result<(Vec, Weight)> { - let target_count = >::iter().count(); + let target_count = CurrentValidatorsCount::::get() as usize; if maybe_max_len.map_or(false, |max_len| target_count > max_len) { return Err("Target snapshot too big"); @@ -2858,7 +3089,7 @@ impl frame_election_provider_support::ElectionDataProvider = target_stake .and_then(|w| >::try_from(w).ok()) - .unwrap_or(T::Currency::minimum_balance() * 100u32.into()); + .unwrap_or(MinNominatorBond::::get() * 100u32.into()); >::insert(v.clone(), v.clone()); >::insert( v.clone(), @@ -2870,8 +3101,8 @@ impl frame_election_provider_support::ElectionDataProvider>::insert( - v, + Self::do_add_validator( + &v, ValidatorPrefs { commission: Perbill::zero(), blocked: false }, ); }); @@ -2891,8 +3122,8 @@ impl frame_election_provider_support::ElectionDataProvider>::insert( - v, + Self::do_add_nominator( + &v, Nominations { targets: t, submitted_in: 0, suppressed: false }, ); }); diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index f58cdf0d2350f..35a1fa45284da 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -242,6 +242,7 @@ impl onchain::Config for Test { type Accuracy = Perbill; type DataProvider = Staking; } + impl Config for Test { const MAX_NOMINATIONS: u32 = 16; type Currency = Balances; @@ -286,6 +287,8 @@ pub struct ExtBuilder { invulnerables: Vec, has_stakers: bool, initialize_first_session: bool, + min_nominator_bond: Balance, + min_validator_bond: Balance, } impl Default for ExtBuilder { @@ -300,6 +303,8 @@ impl Default for ExtBuilder { invulnerables: vec![], has_stakers: true, initialize_first_session: true, + min_nominator_bond: ExistentialDeposit::get(), + min_validator_bond: ExistentialDeposit::get(), } } } @@ -361,7 +366,15 @@ impl ExtBuilder { OFFSET.with(|v| *v.borrow_mut() = offset); self } - pub fn build(self) -> sp_io::TestExternalities { + pub fn min_nominator_bond(mut self, amount: Balance) -> Self { + self.min_nominator_bond = amount; + self + } + pub fn min_validator_bond(mut self, amount: Balance) -> Self { + self.min_validator_bond = amount; + self + } + fn build(self) -> sp_io::TestExternalities { sp_tracing::try_init_simple(); let mut storage = frame_system::GenesisConfig::default() .build_storage::() @@ -434,6 +447,8 @@ impl ExtBuilder { minimum_validator_count: self.minimum_validator_count, invulnerables: self.invulnerables, slash_reward_fraction: Perbill::from_percent(10), + min_nominator_bond: self.min_nominator_bond, + min_validator_bond: self.min_validator_bond, ..Default::default() } .assimilate_storage(&mut storage); @@ -477,6 +492,14 @@ fn post_conditions() { check_nominators(); check_exposures(); check_ledgers(); + check_count(); +} + +fn check_count() { + let nominator_count = Nominators::::iter().count() as u32; + let validator_count = Validators::::iter().count() as u32; + assert_eq!(nominator_count, CurrentNominatorsCount::::get()); + assert_eq!(validator_count, CurrentValidatorsCount::::get()); } fn check_ledgers() { diff --git a/frame/staking/src/testing_utils.rs b/frame/staking/src/testing_utils.rs index 185b96983ab94..8a4392edfed25 100644 --- a/frame/staking/src/testing_utils.rs +++ b/frame/staking/src/testing_utils.rs @@ -29,8 +29,10 @@ const SEED: u32 = 0; /// This function removes all validators and nominators from storage. pub fn clear_validators_and_nominators() { - Validators::::remove_all(); - Nominators::::remove_all(); + Validators::::remove_all(None); + CurrentValidatorsCount::::kill(); + Nominators::::remove_all(None); + CurrentNominatorsCount::::kill(); } /// Grab a funded user. diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index ee8f78769e70a..976ee34d9b8eb 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -297,8 +297,7 @@ fn staking_should_work() { ExtBuilder::default() .nominate(false) .fair(false) // to give 20 more staked value - .build() - .execute_with(|| { + .build_and_execute(|| { // remember + compare this along with the test. assert_eq_uvec!(validator_controllers(), vec![20, 10]); @@ -374,8 +373,7 @@ fn blocking_and_kicking_works() { .validator_count(4) .nominate(true) .num_validators(3) - .build() - .execute_with(|| { + .build_and_execute(|| { // block validator 10/11 assert_ok!(Staking::validate(Origin::signed(10), ValidatorPrefs { blocked: true, .. Default::default() })); // attempt to nominate from 100/101... @@ -398,8 +396,7 @@ fn less_than_needed_candidates_works() { .validator_count(4) .nominate(false) .num_validators(3) - .build() - .execute_with(|| { + .build_and_execute(|| { assert_eq!(Staking::validator_count(), 4); assert_eq!(Staking::minimum_validator_count(), 1); assert_eq_uvec!(validator_controllers(), vec![30, 20, 10]); @@ -426,8 +423,7 @@ fn no_candidate_emergency_condition() { .num_validators(4) .validator_pool(true) .nominate(false) - .build() - .execute_with(|| { + .build_and_execute(|| { // initial validators assert_eq_uvec!(validator_controllers(), vec![10, 20, 30, 40]); let prefs = ValidatorPrefs { commission: Perbill::one(), .. Default::default() }; @@ -468,8 +464,7 @@ fn nominating_and_rewards_should_work() { ExtBuilder::default() .nominate(false) .validator_pool(true) - .build() - .execute_with(|| { + .build_and_execute(|| { // initial validators -- everyone is actually even. assert_eq_uvec!(validator_controllers(), vec![40, 30]); @@ -1254,8 +1249,7 @@ fn rebond_works() { // * it can re-bond a portion of the funds scheduled to unlock. ExtBuilder::default() .nominate(false) - .build() - .execute_with(|| { + .build_and_execute(|| { // Set payee to controller. avoids confusion assert_ok!(Staking::set_payee( Origin::signed(10), @@ -1399,8 +1393,7 @@ fn rebond_is_fifo() { // Rebond should proceed by reversing the most recent bond operations. ExtBuilder::default() .nominate(false) - .build() - .execute_with(|| { + .build_and_execute(|| { // Set payee to controller. avoids confusion assert_ok!(Staking::set_payee( Origin::signed(10), @@ -1547,109 +1540,117 @@ fn reward_to_stake_works() { fn on_free_balance_zero_stash_removes_validator() { // Tests that validator storage items are cleaned up when stash is empty // Tests that storage items are untouched when controller is empty - ExtBuilder::default().existential_deposit(10).build_and_execute(|| { - // Check the balance of the validator account - assert_eq!(Balances::free_balance(10), 256); - // Check the balance of the stash account - assert_eq!(Balances::free_balance(11), 256000); - // Check these two accounts are bonded - assert_eq!(Staking::bonded(&11), Some(10)); - - // Set some storage items which we expect to be cleaned up - // Set payee information - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Stash)); - - // Check storage items that should be cleaned up - assert!(>::contains_key(&10)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - - // Reduce free_balance of controller to 0 - let _ = Balances::slash(&10, Balance::max_value()); - - // Check the balance of the stash account has not been touched - assert_eq!(Balances::free_balance(11), 256000); - // Check these two accounts are still bonded - assert_eq!(Staking::bonded(&11), Some(10)); - - // Check storage items have not changed - assert!(>::contains_key(&10)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - - // Reduce free_balance of stash to 0 - let _ = Balances::slash(&11, Balance::max_value()); - // Check total balance of stash - assert_eq!(Balances::total_balance(&11), 10); - - // Reap the stash - assert_ok!(Staking::reap_stash(Origin::none(), 11, 0)); - - // Check storage items do not exist - assert!(!>::contains_key(&10)); - assert!(!>::contains_key(&11)); - assert!(!>::contains_key(&11)); - assert!(!>::contains_key(&11)); - assert!(!>::contains_key(&11)); - }); + ExtBuilder::default() + .existential_deposit(10) + .min_nominator_bond(10) + .min_validator_bond(10) + .build_and_execute(|| { + // Check the balance of the validator account + assert_eq!(Balances::free_balance(10), 256); + // Check the balance of the stash account + assert_eq!(Balances::free_balance(11), 256000); + // Check these two accounts are bonded + assert_eq!(Staking::bonded(&11), Some(10)); + + // Set some storage items which we expect to be cleaned up + // Set payee information + assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Stash)); + + // Check storage items that should be cleaned up + assert!(>::contains_key(&10)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + + // Reduce free_balance of controller to 0 + let _ = Balances::slash(&10, Balance::max_value()); + + // Check the balance of the stash account has not been touched + assert_eq!(Balances::free_balance(11), 256000); + // Check these two accounts are still bonded + assert_eq!(Staking::bonded(&11), Some(10)); + + // Check storage items have not changed + assert!(>::contains_key(&10)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + + // Reduce free_balance of stash to 0 + let _ = Balances::slash(&11, Balance::max_value()); + // Check total balance of stash + assert_eq!(Balances::total_balance(&11), 10); + + // Reap the stash + assert_ok!(Staking::reap_stash(Origin::none(), 11, 0)); + + // Check storage items do not exist + assert!(!>::contains_key(&10)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + }); } #[test] fn on_free_balance_zero_stash_removes_nominator() { // Tests that nominator storage items are cleaned up when stash is empty // Tests that storage items are untouched when controller is empty - ExtBuilder::default().existential_deposit(10).build_and_execute(|| { - // Make 10 a nominator - assert_ok!(Staking::nominate(Origin::signed(10), vec![20])); - // Check that account 10 is a nominator - assert!(>::contains_key(11)); - // Check the balance of the nominator account - assert_eq!(Balances::free_balance(10), 256); - // Check the balance of the stash account - assert_eq!(Balances::free_balance(11), 256000); - - // Set payee information - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Stash)); - - // Check storage items that should be cleaned up - assert!(>::contains_key(&10)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - - // Reduce free_balance of controller to 0 - let _ = Balances::slash(&10, Balance::max_value()); - // Check total balance of account 10 - assert_eq!(Balances::total_balance(&10), 0); - - // Check the balance of the stash account has not been touched - assert_eq!(Balances::free_balance(11), 256000); - // Check these two accounts are still bonded - assert_eq!(Staking::bonded(&11), Some(10)); - - // Check storage items have not changed - assert!(>::contains_key(&10)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - - // Reduce free_balance of stash to 0 - let _ = Balances::slash(&11, Balance::max_value()); - // Check total balance of stash - assert_eq!(Balances::total_balance(&11), 10); - - // Reap the stash - assert_ok!(Staking::reap_stash(Origin::none(), 11, 0)); - - // Check storage items do not exist - assert!(!>::contains_key(&10)); - assert!(!>::contains_key(&11)); - assert!(!>::contains_key(&11)); - assert!(!>::contains_key(&11)); - assert!(!>::contains_key(&11)); - }); + ExtBuilder::default() + .existential_deposit(10) + .min_nominator_bond(10) + .min_validator_bond(10) + .build_and_execute(|| { + // Make 10 a nominator + assert_ok!(Staking::nominate(Origin::signed(10), vec![20])); + // Check that account 10 is a nominator + assert!(>::contains_key(11)); + // Check the balance of the nominator account + assert_eq!(Balances::free_balance(10), 256); + // Check the balance of the stash account + assert_eq!(Balances::free_balance(11), 256000); + + // Set payee information + assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Stash)); + + // Check storage items that should be cleaned up + assert!(>::contains_key(&10)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + + // Reduce free_balance of controller to 0 + let _ = Balances::slash(&10, Balance::max_value()); + // Check total balance of account 10 + assert_eq!(Balances::total_balance(&10), 0); + + // Check the balance of the stash account has not been touched + assert_eq!(Balances::free_balance(11), 256000); + // Check these two accounts are still bonded + assert_eq!(Staking::bonded(&11), Some(10)); + + // Check storage items have not changed + assert!(>::contains_key(&10)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + + // Reduce free_balance of stash to 0 + let _ = Balances::slash(&11, Balance::max_value()); + // Check total balance of stash + assert_eq!(Balances::total_balance(&11), 10); + + // Reap the stash + assert_ok!(Staking::reap_stash(Origin::none(), 11, 0)); + + // Check storage items do not exist + assert!(!>::contains_key(&10)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + }); } @@ -1725,14 +1726,15 @@ fn bond_with_no_staked_value() { ExtBuilder::default() .validator_count(3) .existential_deposit(5) + .min_nominator_bond(5) + .min_validator_bond(5) .nominate(false) .minimum_validator_count(1) - .build() - .execute_with(|| { + .build_and_execute(|| { // Can't bond with 1 assert_noop!( Staking::bond(Origin::signed(1), 2, 1, RewardDestination::Controller), - Error::::InsufficientValue, + Error::::InsufficientBond, ); // bonded with absolute minimum value possible. assert_ok!(Staking::bond(Origin::signed(1), 2, 5, RewardDestination::Controller)); @@ -1774,8 +1776,7 @@ fn bond_with_little_staked_value_bounded() { .validator_count(3) .nominate(false) .minimum_validator_count(1) - .build() - .execute_with(|| { + .build_and_execute(|| { // setup assert_ok!(Staking::chill(Origin::signed(30))); assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); @@ -1828,8 +1829,7 @@ fn bond_with_duplicate_vote_should_be_ignored_by_election_provider() { .validator_count(2) .nominate(false) .minimum_validator_count(1) - .build() - .execute_with(|| { + .build_and_execute(|| { // disable the nominator assert_ok!(Staking::chill(Origin::signed(100))); // make stakes equal. @@ -1876,8 +1876,7 @@ fn bond_with_duplicate_vote_should_be_ignored_by_election_provider_elected() { .validator_count(2) .nominate(false) .minimum_validator_count(1) - .build() - .execute_with(|| { + .build_and_execute(|| { // disable the nominator assert_ok!(Staking::chill(Origin::signed(100))); // 31/30 will have less stake @@ -1923,8 +1922,7 @@ fn new_era_elects_correct_number_of_validators() { .validator_pool(true) .fair(true) .validator_count(1) - .build() - .execute_with(|| { + .build_and_execute(|| { assert_eq!(Staking::validator_count(), 1); assert_eq!(validator_controllers().len(), 1); @@ -2466,7 +2464,11 @@ fn only_slash_for_max_in_era() { #[test] fn garbage_collection_after_slashing() { // ensures that `SlashingSpans` and `SpanSlash` of an account is removed after reaping. - ExtBuilder::default().existential_deposit(2).build_and_execute(|| { + ExtBuilder::default() + .existential_deposit(2) + .min_nominator_bond(2) + .min_validator_bond(2) + .build_and_execute(|| { assert_eq!(Balances::free_balance(11), 256_000); on_offence_now( @@ -3723,6 +3725,8 @@ fn session_buffering_no_offset() { fn cannot_rebond_to_lower_than_ed() { ExtBuilder::default() .existential_deposit(10) + .min_nominator_bond(10) + .min_validator_bond(10) .build_and_execute(|| { // stash must have more balance than bonded for this to work. assert_eq!(Balances::free_balance(&21), 512_000); @@ -3739,7 +3743,8 @@ fn cannot_rebond_to_lower_than_ed() { } ); - // unbond all of it. + // unbond all of it. must be chilled first. + assert_ok!(Staking::chill(Origin::signed(20))); assert_ok!(Staking::unbond(Origin::signed(20), 1000)); assert_eq!( Staking::ledger(&20).unwrap(), @@ -3755,7 +3760,7 @@ fn cannot_rebond_to_lower_than_ed() { // now bond a wee bit more assert_noop!( Staking::rebond(Origin::signed(20), 5), - Error::::InsufficientValue, + Error::::InsufficientBond, ); }) } @@ -3764,6 +3769,8 @@ fn cannot_rebond_to_lower_than_ed() { fn cannot_bond_extra_to_lower_than_ed() { ExtBuilder::default() .existential_deposit(10) + .min_nominator_bond(10) + .min_validator_bond(10) .build_and_execute(|| { // stash must have more balance than bonded for this to work. assert_eq!(Balances::free_balance(&21), 512_000); @@ -3780,7 +3787,8 @@ fn cannot_bond_extra_to_lower_than_ed() { } ); - // unbond all of it. + // unbond all of it. must be chilled first. + assert_ok!(Staking::chill(Origin::signed(20))); assert_ok!(Staking::unbond(Origin::signed(20), 1000)); assert_eq!( Staking::ledger(&20).unwrap(), @@ -3799,7 +3807,7 @@ fn cannot_bond_extra_to_lower_than_ed() { // now bond a wee bit more assert_noop!( Staking::bond_extra(Origin::signed(21), 5), - Error::::InsufficientValue, + Error::::InsufficientBond, ); }) } @@ -3809,6 +3817,8 @@ fn do_not_die_when_active_is_ed() { let ed = 10; ExtBuilder::default() .existential_deposit(ed) + .min_nominator_bond(ed) + .min_validator_bond(ed) .build_and_execute(|| { // initial stuff. assert_eq!( @@ -3888,7 +3898,7 @@ mod election_data_provider { #[test] fn voters_include_self_vote() { - ExtBuilder::default().nominate(false).build().execute_with(|| { + ExtBuilder::default().nominate(false).build_and_execute(|| { assert!(>::iter().map(|(x, _)| x).all(|v| Staking::voters(None) .unwrap() .0 @@ -3900,7 +3910,7 @@ mod election_data_provider { #[test] fn voters_exclude_slashed() { - ExtBuilder::default().build().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); assert_eq!( >::voters(None) @@ -3946,7 +3956,7 @@ mod election_data_provider { #[test] fn respects_len_limits() { - ExtBuilder::default().build().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { assert_eq!(Staking::voters(Some(1)).unwrap_err(), "Voter snapshot too big"); assert_eq!(Staking::targets(Some(1)).unwrap_err(), "Target snapshot too big"); }); @@ -3954,7 +3964,7 @@ mod election_data_provider { #[test] fn estimate_next_election_works() { - ExtBuilder::default().session_per_era(5).period(5).build().execute_with(|| { + ExtBuilder::default().session_per_era(5).period(5).build_and_execute(|| { // first session is always length 0. for b in 1..20 { run_to_block(b); @@ -4013,4 +4023,129 @@ mod election_data_provider { assert_eq!(ForceEra::::get(), Forcing::NotForcing); }) } + + #[test] + #[should_panic] + fn count_check_works() { + ExtBuilder::default().build_and_execute(|| { + // We should never insert into the validators or nominators map directly as this will + // not keep track of the count. This test should panic as we verify the count is accurate + // after every test using the `post_checks` in `mock`. + Validators::::insert(987654321, ValidatorPrefs::default()); + Nominators::::insert(987654321, Nominations { + targets: vec![], + submitted_in: Default::default(), + suppressed: false, + }); + }) + } + + #[test] + fn min_bond_checks_work() { + ExtBuilder::default() + .existential_deposit(100) + .min_nominator_bond(1_000) + .min_validator_bond(1_500) + .build_and_execute(|| { + // 500 is not enough for any role + assert_ok!(Staking::bond(Origin::signed(3), 4, 500, RewardDestination::Controller)); + assert_noop!(Staking::nominate(Origin::signed(4), vec![1]), Error::::InsufficientBond); + assert_noop!(Staking::validate(Origin::signed(4), ValidatorPrefs::default()), Error::::InsufficientBond); + + // 1000 is enough for nominator + assert_ok!(Staking::bond_extra(Origin::signed(3), 500)); + assert_ok!(Staking::nominate(Origin::signed(4), vec![1])); + assert_noop!(Staking::validate(Origin::signed(4), ValidatorPrefs::default()), Error::::InsufficientBond); + + // 1500 is enough for validator + assert_ok!(Staking::bond_extra(Origin::signed(3), 500)); + assert_ok!(Staking::nominate(Origin::signed(4), vec![1])); + assert_ok!(Staking::validate(Origin::signed(4), ValidatorPrefs::default())); + + // Can't unbond anything as validator + assert_noop!(Staking::unbond(Origin::signed(4), 500), Error::::InsufficientBond); + + // Once they are a nominator, they can unbond 500 + assert_ok!(Staking::nominate(Origin::signed(4), vec![1])); + assert_ok!(Staking::unbond(Origin::signed(4), 500)); + assert_noop!(Staking::unbond(Origin::signed(4), 500), Error::::InsufficientBond); + + // Once they are chilled they can unbond everything + assert_ok!(Staking::chill(Origin::signed(4))); + assert_ok!(Staking::unbond(Origin::signed(4), 1000)); + }) + } + + #[test] + fn chill_other_works() { + ExtBuilder::default() + .existential_deposit(100) + .min_nominator_bond(1_000) + .min_validator_bond(1_500) + .build_and_execute(|| { + // Nominator + assert_ok!(Staking::bond(Origin::signed(1), 2, 1000, RewardDestination::Controller)); + assert_ok!(Staking::nominate(Origin::signed(2), vec![1])); + + // Validator + assert_ok!(Staking::bond(Origin::signed(3), 4, 1500, RewardDestination::Controller)); + assert_ok!(Staking::validate(Origin::signed(4), ValidatorPrefs::default())); + + // Can't chill these users + assert_noop!(Staking::chill_other(Origin::signed(1), 2), Error::::CannotChillOther); + assert_noop!(Staking::chill_other(Origin::signed(1), 4), Error::::CannotChillOther); + + // Change the minimum bond + assert_ok!(Staking::update_staking_limits(Origin::root(), 1_500, 2_000, None, None)); + + // Users can now be chilled + assert_ok!(Staking::chill_other(Origin::signed(1), 2)); + assert_ok!(Staking::chill_other(Origin::signed(1), 4)); + }) + } + + #[test] + fn capped_stakers_works() { + ExtBuilder::default().build_and_execute(|| { + let validator_count = CurrentValidatorsCount::::get(); + assert_eq!(validator_count, 3); + let nominator_count = CurrentNominatorsCount::::get(); + assert_eq!(nominator_count, 1); + + // Change the maximums + let max = 10; + assert_ok!(Staking::update_staking_limits(Origin::root(), 10, 10, Some(max), Some(max))); + + // can create `max - validator_count` validators + assert_ok!(testing_utils::create_validators::(max - validator_count, 100)); + + // but no more + let (_, last_validator) = testing_utils::create_stash_controller::( + 1337, 100, RewardDestination::Controller, + ).unwrap(); + assert_noop!( + Staking::validate(Origin::signed(last_validator), ValidatorPrefs::default()), + Error::::TooManyValidators, + ); + + // same with nominators + for i in 0 .. max - nominator_count { + let (_, controller) = testing_utils::create_stash_controller::( + i + 10_000_000, 100, RewardDestination::Controller, + ).unwrap(); + assert_ok!(Staking::nominate(Origin::signed(controller), vec![1])); + } + + // one more is too many + let (_, last_nominator) = testing_utils::create_stash_controller::( + 20_000_000, 100, RewardDestination::Controller, + ).unwrap(); + assert_noop!(Staking::nominate(Origin::signed(last_nominator), vec![1]), Error::::TooManyNominators); + + // No problem when we set to `None` again + assert_ok!(Staking::update_staking_limits(Origin::root(), 10, 10, None, None)); + assert_ok!(Staking::nominate(Origin::signed(last_nominator), vec![1])); + assert_ok!(Staking::validate(Origin::signed(last_validator), ValidatorPrefs::default())); + }) + } } diff --git a/frame/staking/src/weights.rs b/frame/staking/src/weights.rs index 5960d6612566e..980b0855fbd81 100644 --- a/frame/staking/src/weights.rs +++ b/frame/staking/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_staking //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-06-07, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-06-15, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -70,365 +70,383 @@ pub trait WeightInfo { fn new_era(v: u32, n: u32, ) -> Weight; fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight; fn get_npos_targets(v: u32, ) -> Weight; + fn update_staking_limits() -> Weight; + fn chill_other() -> Weight; } /// Weights for pallet_staking using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn bond() -> Weight { - (91_959_000 as Weight) + (91_278_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn bond_extra() -> Weight { - (69_291_000 as Weight) + (69_833_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn unbond() -> Weight { - (63_513_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) + (75_020_000 as Weight) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_update(s: u32, ) -> Weight { - (64_747_000 as Weight) - // Standard Error: 0 - .saturating_add((77_000 as Weight).saturating_mul(s as Weight)) + (63_898_000 as Weight) + // Standard Error: 1_000 + .saturating_add((50_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_kill(s: u32, ) -> Weight { - (100_375_000 as Weight) + (103_717_000 as Weight) // Standard Error: 1_000 - .saturating_add((3_067_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(8 as Weight)) + .saturating_add((2_942_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(8 as Weight)) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn validate() -> Weight { - (17_849_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) + (40_702_000 as Weight) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn kick(k: u32, ) -> Weight { - (27_939_000 as Weight) - // Standard Error: 16_000 - .saturating_add((21_431_000 as Weight).saturating_mul(k as Weight)) + (33_572_000 as Weight) + // Standard Error: 18_000 + .saturating_add((20_771_000 as Weight).saturating_mul(k as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(k as Weight))) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } fn nominate(n: u32, ) -> Weight { - (32_791_000 as Weight) - // Standard Error: 33_000 - .saturating_add((7_006_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) + (53_561_000 as Weight) + // Standard Error: 34_000 + .saturating_add((6_652_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn chill() -> Weight { - (17_014_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + (21_489_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) } fn set_payee() -> Weight { - (14_816_000 as Weight) + (14_514_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_controller() -> Weight { - (33_600_000 as Weight) + (32_598_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn set_validator_count() -> Weight { - (2_706_000 as Weight) + (2_477_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_no_eras() -> Weight { - (2_973_000 as Weight) + (2_743_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_new_era() -> Weight { - (2_949_000 as Weight) + (2_784_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_new_era_always() -> Weight { - (3_011_000 as Weight) + (2_749_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_invulnerables(v: u32, ) -> Weight { - (3_078_000 as Weight) + (2_798_000 as Weight) // Standard Error: 0 .saturating_add((5_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_unstake(s: u32, ) -> Weight { - (69_220_000 as Weight) - // Standard Error: 1_000 - .saturating_add((3_070_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(8 as Weight)) + (70_372_000 as Weight) + // Standard Error: 13_000 + .saturating_add((3_029_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn cancel_deferred_slash(s: u32, ) -> Weight { - (3_460_399_000 as Weight) - // Standard Error: 222_000 - .saturating_add((19_782_000 as Weight).saturating_mul(s as Weight)) + (3_436_822_000 as Weight) + // Standard Error: 221_000 + .saturating_add((19_799_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn payout_stakers_dead_controller(n: u32, ) -> Weight { - (120_436_000 as Weight) + (132_018_000 as Weight) // Standard Error: 27_000 - .saturating_add((63_092_000 as Weight).saturating_mul(n as Weight)) + .saturating_add((61_340_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(10 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) } fn payout_stakers_alive_staked(n: u32, ) -> Weight { - (181_424_000 as Weight) - // Standard Error: 51_000 - .saturating_add((78_631_000 as Weight).saturating_mul(n as Weight)) + (158_346_000 as Weight) + // Standard Error: 61_000 + .saturating_add((77_147_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(11 as Weight)) .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(3 as Weight)) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) } fn rebond(l: u32, ) -> Weight { - (59_349_000 as Weight) + (57_756_000 as Weight) // Standard Error: 2_000 - .saturating_add((64_000 as Weight).saturating_mul(l as Weight)) + .saturating_add((79_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn set_history_depth(e: u32, ) -> Weight { (0 as Weight) - // Standard Error: 97_000 - .saturating_add((44_609_000 as Weight).saturating_mul(e as Weight)) + // Standard Error: 100_000 + .saturating_add((44_873_000 as Weight).saturating_mul(e as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) .saturating_add(T::DbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) } fn reap_stash(s: u32, ) -> Weight { - (72_356_000 as Weight) - // Standard Error: 2_000 - .saturating_add((3_066_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(8 as Weight)) + (75_073_000 as Weight) + // Standard Error: 4_000 + .saturating_add((2_988_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn new_era(v: u32, n: u32, ) -> Weight { (0 as Weight) - // Standard Error: 1_462_000 - .saturating_add((393_007_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 73_000 - .saturating_add((72_014_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 1_146_000 + .saturating_add((362_986_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 57_000 + .saturating_add((60_216_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(10 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(9 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) } fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { (0 as Weight) - // Standard Error: 235_000 - .saturating_add((35_212_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 235_000 - .saturating_add((38_391_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 3_200_000 - .saturating_add((31_130_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 230_000 + .saturating_add((35_891_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 230_000 + .saturating_add((37_854_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 7_842_000 + .saturating_add((32_492_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) } fn get_npos_targets(v: u32, ) -> Weight { - (52_314_000 as Weight) - // Standard Error: 71_000 - .saturating_add((15_195_000 as Weight).saturating_mul(v as Weight)) + (0 as Weight) + // Standard Error: 74_000 + .saturating_add((16_370_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) } + fn update_staking_limits() -> Weight { + (6_398_000 as Weight) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + fn chill_other() -> Weight { + (44_694_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } } // For backwards compatibility and tests impl WeightInfo for () { fn bond() -> Weight { - (91_959_000 as Weight) + (91_278_000 as Weight) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn bond_extra() -> Weight { - (69_291_000 as Weight) + (69_833_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn unbond() -> Weight { - (63_513_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + (75_020_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_update(s: u32, ) -> Weight { - (64_747_000 as Weight) - // Standard Error: 0 - .saturating_add((77_000 as Weight).saturating_mul(s as Weight)) + (63_898_000 as Weight) + // Standard Error: 1_000 + .saturating_add((50_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_kill(s: u32, ) -> Weight { - (100_375_000 as Weight) + (103_717_000 as Weight) // Standard Error: 1_000 - .saturating_add((3_067_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(8 as Weight)) + .saturating_add((2_942_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(8 as Weight)) + .saturating_add(RocksDbWeight::get().writes(6 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn validate() -> Weight { - (17_849_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + (40_702_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn kick(k: u32, ) -> Weight { - (27_939_000 as Weight) - // Standard Error: 16_000 - .saturating_add((21_431_000 as Weight).saturating_mul(k as Weight)) + (33_572_000 as Weight) + // Standard Error: 18_000 + .saturating_add((20_771_000 as Weight).saturating_mul(k as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(k as Weight))) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } fn nominate(n: u32, ) -> Weight { - (32_791_000 as Weight) - // Standard Error: 33_000 - .saturating_add((7_006_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + (53_561_000 as Weight) + // Standard Error: 34_000 + .saturating_add((6_652_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn chill() -> Weight { - (17_014_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + (21_489_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) } fn set_payee() -> Weight { - (14_816_000 as Weight) + (14_514_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn set_controller() -> Weight { - (33_600_000 as Weight) + (32_598_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn set_validator_count() -> Weight { - (2_706_000 as Weight) + (2_477_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_no_eras() -> Weight { - (2_973_000 as Weight) + (2_743_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_new_era() -> Weight { - (2_949_000 as Weight) + (2_784_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_new_era_always() -> Weight { - (3_011_000 as Weight) + (2_749_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn set_invulnerables(v: u32, ) -> Weight { - (3_078_000 as Weight) + (2_798_000 as Weight) // Standard Error: 0 .saturating_add((5_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_unstake(s: u32, ) -> Weight { - (69_220_000 as Weight) - // Standard Error: 1_000 - .saturating_add((3_070_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(8 as Weight)) + (70_372_000 as Weight) + // Standard Error: 13_000 + .saturating_add((3_029_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(6 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn cancel_deferred_slash(s: u32, ) -> Weight { - (3_460_399_000 as Weight) - // Standard Error: 222_000 - .saturating_add((19_782_000 as Weight).saturating_mul(s as Weight)) + (3_436_822_000 as Weight) + // Standard Error: 221_000 + .saturating_add((19_799_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn payout_stakers_dead_controller(n: u32, ) -> Weight { - (120_436_000 as Weight) + (132_018_000 as Weight) // Standard Error: 27_000 - .saturating_add((63_092_000 as Weight).saturating_mul(n as Weight)) + .saturating_add((61_340_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(10 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) } fn payout_stakers_alive_staked(n: u32, ) -> Weight { - (181_424_000 as Weight) - // Standard Error: 51_000 - .saturating_add((78_631_000 as Weight).saturating_mul(n as Weight)) + (158_346_000 as Weight) + // Standard Error: 61_000 + .saturating_add((77_147_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(11 as Weight)) .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) } fn rebond(l: u32, ) -> Weight { - (59_349_000 as Weight) + (57_756_000 as Weight) // Standard Error: 2_000 - .saturating_add((64_000 as Weight).saturating_mul(l as Weight)) + .saturating_add((79_000 as Weight).saturating_mul(l as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn set_history_depth(e: u32, ) -> Weight { (0 as Weight) - // Standard Error: 97_000 - .saturating_add((44_609_000 as Weight).saturating_mul(e as Weight)) + // Standard Error: 100_000 + .saturating_add((44_873_000 as Weight).saturating_mul(e as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) .saturating_add(RocksDbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) } fn reap_stash(s: u32, ) -> Weight { - (72_356_000 as Weight) - // Standard Error: 2_000 - .saturating_add((3_066_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(8 as Weight)) + (75_073_000 as Weight) + // Standard Error: 4_000 + .saturating_add((2_988_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(6 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn new_era(v: u32, n: u32, ) -> Weight { (0 as Weight) - // Standard Error: 1_462_000 - .saturating_add((393_007_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 73_000 - .saturating_add((72_014_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 1_146_000 + .saturating_add((362_986_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 57_000 + .saturating_add((60_216_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(10 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(9 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) } fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { (0 as Weight) - // Standard Error: 235_000 - .saturating_add((35_212_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 235_000 - .saturating_add((38_391_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 3_200_000 - .saturating_add((31_130_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 230_000 + .saturating_add((35_891_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 230_000 + .saturating_add((37_854_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 7_842_000 + .saturating_add((32_492_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) } fn get_npos_targets(v: u32, ) -> Weight { - (52_314_000 as Weight) - // Standard Error: 71_000 - .saturating_add((15_195_000 as Weight).saturating_mul(v as Weight)) + (0 as Weight) + // Standard Error: 74_000 + .saturating_add((16_370_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) } + fn update_staking_limits() -> Weight { + (6_398_000 as Weight) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + fn chill_other() -> Weight { + (44_694_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } } From 811cbb821c08f34167ecca7cc54921fb18a771b4 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Wed, 16 Jun 2021 15:19:10 +0100 Subject: [PATCH 53/61] Make backwards compatible with CountedMap (#9126) --- frame/staking/src/lib.rs | 46 +++++++++++++++--------------- frame/staking/src/mock.rs | 4 +-- frame/staking/src/testing_utils.rs | 4 +-- frame/staking/src/tests.rs | 4 +-- 4 files changed, 29 insertions(+), 29 deletions(-) diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index aa66efc3cce0c..70e806f57be46 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -761,8 +761,8 @@ pub mod migrations { use super::*; pub fn pre_migrate() -> Result<(), &'static str> { - assert!(CurrentValidatorsCount::::get().is_zero(), "CurrentValidatorsCount already set."); - assert!(CurrentNominatorsCount::::get().is_zero(), "CurrentNominatorsCount already set."); + assert!(CounterForValidators::::get().is_zero(), "CounterForValidators already set."); + assert!(CounterForNominators::::get().is_zero(), "CounterForNominators already set."); assert!(StorageVersion::::get() == Releases::V6_0_0); Ok(()) } @@ -772,8 +772,8 @@ pub mod migrations { let validator_count = Validators::::iter().count() as u32; let nominator_count = Nominators::::iter().count() as u32; - CurrentValidatorsCount::::put(validator_count); - CurrentNominatorsCount::::put(nominator_count); + CounterForValidators::::put(validator_count); + CounterForNominators::::put(nominator_count); StorageVersion::::put(Releases::V7_0_0); log!(info, "Completed staking migration to Releases::V7_0_0"); @@ -998,14 +998,14 @@ pub mod pallet { /// The map from (wannabe) validator stash key to the preferences of that validator. /// - /// When updating this storage item, you must also update the `CurrentValidatorsCount`. + /// When updating this storage item, you must also update the `CounterForValidators`. #[pallet::storage] #[pallet::getter(fn validators)] pub type Validators = StorageMap<_, Twox64Concat, T::AccountId, ValidatorPrefs, ValueQuery>; /// A tracker to keep count of the number of items in the `Validators` map. #[pallet::storage] - pub type CurrentValidatorsCount = StorageValue<_, u32, ValueQuery>; + pub type CounterForValidators = StorageValue<_, u32, ValueQuery>; /// The maximum validator count before we stop allowing new validators to join. /// @@ -1015,14 +1015,14 @@ pub mod pallet { /// The map from nominator stash key to the set of stash keys of all validators to nominate. /// - /// When updating this storage item, you must also update the `CurrentNominatorsCount`. + /// When updating this storage item, you must also update the `CounterForNominators`. #[pallet::storage] #[pallet::getter(fn nominators)] pub type Nominators = StorageMap<_, Twox64Concat, T::AccountId, Nominations>; /// A tracker to keep count of the number of items in the `Nominators` map. #[pallet::storage] - pub type CurrentNominatorsCount = StorageValue<_, u32, ValueQuery>; + pub type CounterForNominators = StorageValue<_, u32, ValueQuery>; /// The maximum nominator count before we stop allowing new validators to join. /// @@ -1717,7 +1717,7 @@ pub mod pallet { // If this error is reached, we need to adjust the `MinValidatorBond` and start calling `chill_other`. // Until then, we explicitly block new validators to protect the runtime. if let Some(max_validators) = MaxValidatorsCount::::get() { - ensure!(CurrentValidatorsCount::::get() < max_validators, Error::::TooManyValidators); + ensure!(CounterForValidators::::get() < max_validators, Error::::TooManyValidators); } let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; @@ -1758,7 +1758,7 @@ pub mod pallet { // If this error is reached, we need to adjust the `MinNominatorBond` and start calling `chill_other`. // Until then, we explicitly block new nominators to protect the runtime. if let Some(max_nominators) = MaxNominatorsCount::::get() { - ensure!(CurrentNominatorsCount::::get() < max_nominators, Error::::TooManyNominators); + ensure!(CounterForNominators::::get() < max_nominators, Error::::TooManyNominators); } let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; @@ -2966,42 +2966,42 @@ impl Pallet { } /// This function will add a nominator to the `Nominators` storage map, - /// and keep track of the `CurrentNominatorsCount`. + /// and keep track of the `CounterForNominators`. /// /// If the nominator already exists, their nominations will be updated. pub fn do_add_nominator(who: &T::AccountId, nominations: Nominations) { if !Nominators::::contains_key(who) { - CurrentNominatorsCount::::mutate(|x| x.saturating_inc()) + CounterForNominators::::mutate(|x| x.saturating_inc()) } Nominators::::insert(who, nominations); } /// This function will remove a nominator from the `Nominators` storage map, - /// and keep track of the `CurrentNominatorsCount`. + /// and keep track of the `CounterForNominators`. pub fn do_remove_nominator(who: &T::AccountId) { if Nominators::::contains_key(who) { Nominators::::remove(who); - CurrentNominatorsCount::::mutate(|x| x.saturating_dec()); + CounterForNominators::::mutate(|x| x.saturating_dec()); } } /// This function will add a validator to the `Validators` storage map, - /// and keep track of the `CurrentValidatorsCount`. + /// and keep track of the `CounterForValidators`. /// /// If the validator already exists, their preferences will be updated. pub fn do_add_validator(who: &T::AccountId, prefs: ValidatorPrefs) { if !Validators::::contains_key(who) { - CurrentValidatorsCount::::mutate(|x| x.saturating_inc()) + CounterForValidators::::mutate(|x| x.saturating_inc()) } Validators::::insert(who, prefs); } /// This function will remove a validator from the `Validators` storage map, - /// and keep track of the `CurrentValidatorsCount`. + /// and keep track of the `CounterForValidators`. pub fn do_remove_validator(who: &T::AccountId) { if Validators::::contains_key(who) { Validators::::remove(who); - CurrentValidatorsCount::::mutate(|x| x.saturating_dec()); + CounterForValidators::::mutate(|x| x.saturating_dec()); } } } @@ -3017,11 +3017,11 @@ impl frame_election_provider_support::ElectionDataProvider, ) -> data_provider::Result<(Vec<(T::AccountId, VoteWeight, Vec)>, Weight)> { - let nominator_count = CurrentNominatorsCount::::get(); - let validator_count = CurrentValidatorsCount::::get(); + let nominator_count = CounterForNominators::::get(); + let validator_count = CounterForValidators::::get(); let voter_count = nominator_count.saturating_add(validator_count) as usize; - debug_assert!(>::iter().count() as u32 == CurrentNominatorsCount::::get()); - debug_assert!(>::iter().count() as u32 == CurrentValidatorsCount::::get()); + debug_assert!(>::iter().count() as u32 == CounterForNominators::::get()); + debug_assert!(>::iter().count() as u32 == CounterForValidators::::get()); if maybe_max_len.map_or(false, |max_len| voter_count > max_len) { return Err("Voter snapshot too big"); @@ -3037,7 +3037,7 @@ impl frame_election_provider_support::ElectionDataProvider) -> data_provider::Result<(Vec, Weight)> { - let target_count = CurrentValidatorsCount::::get() as usize; + let target_count = CounterForValidators::::get() as usize; if maybe_max_len.map_or(false, |max_len| target_count > max_len) { return Err("Target snapshot too big"); diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 35a1fa45284da..e0079cc3f375a 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -498,8 +498,8 @@ fn post_conditions() { fn check_count() { let nominator_count = Nominators::::iter().count() as u32; let validator_count = Validators::::iter().count() as u32; - assert_eq!(nominator_count, CurrentNominatorsCount::::get()); - assert_eq!(validator_count, CurrentValidatorsCount::::get()); + assert_eq!(nominator_count, CounterForNominators::::get()); + assert_eq!(validator_count, CounterForValidators::::get()); } fn check_ledgers() { diff --git a/frame/staking/src/testing_utils.rs b/frame/staking/src/testing_utils.rs index 8a4392edfed25..c643cb283373b 100644 --- a/frame/staking/src/testing_utils.rs +++ b/frame/staking/src/testing_utils.rs @@ -30,9 +30,9 @@ const SEED: u32 = 0; /// This function removes all validators and nominators from storage. pub fn clear_validators_and_nominators() { Validators::::remove_all(None); - CurrentValidatorsCount::::kill(); + CounterForValidators::::kill(); Nominators::::remove_all(None); - CurrentNominatorsCount::::kill(); + CounterForNominators::::kill(); } /// Grab a funded user. diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 976ee34d9b8eb..5d42d866b1336 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -4107,9 +4107,9 @@ mod election_data_provider { #[test] fn capped_stakers_works() { ExtBuilder::default().build_and_execute(|| { - let validator_count = CurrentValidatorsCount::::get(); + let validator_count = CounterForValidators::::get(); assert_eq!(validator_count, 3); - let nominator_count = CurrentNominatorsCount::::get(); + let nominator_count = CounterForNominators::::get(); assert_eq!(nominator_count, 1); // Change the maximums From 87e38e45d5c0dba66f0a4e95f4606b5179b4333f Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Thu, 17 Jun 2021 16:37:43 +0200 Subject: [PATCH 54/61] Make it possible to override maximum payload of RPC (#9019) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Make it possible to override maximum payload of RPC * Finish it. * remove todo. * Update client/cli/src/commands/run_cmd.rs * Apply suggestions from code review Co-authored-by: David * Apply suggestions from code review Co-authored-by: David * Incorporate suggestions * Thread rpc_max_payload from configuration to trace_block * Try obey line gitlab/check_line_width.sh * update state rpc tests * Improve readbility * Apply suggestions from code review * Apply suggestions from code review Co-authored-by: Bastian Köcher Co-authored-by: Zeke Mostov <32168567+emostov@users.noreply.github.com> Co-authored-by: David --- client/cli/src/commands/run_cmd.rs | 32 +++++++++++++++++--------- client/cli/src/config.rs | 6 +++++ client/executor/src/native_executor.rs | 2 +- client/rpc-servers/src/lib.rs | 18 +++++++++++---- client/rpc/src/state/mod.rs | 7 ++++-- client/rpc/src/state/state_full.rs | 21 +++++++++++++---- client/rpc/src/state/tests.rs | 8 +++++++ client/service/src/builder.rs | 1 + client/service/src/config.rs | 2 ++ client/service/src/lib.rs | 2 ++ client/service/test/src/lib.rs | 1 + client/tracing/src/block/mod.rs | 11 ++++++--- test-utils/test-runner/src/utils.rs | 1 + utils/browser/src/lib.rs | 1 + 14 files changed, 87 insertions(+), 26 deletions(-) diff --git a/client/cli/src/commands/run_cmd.rs b/client/cli/src/commands/run_cmd.rs index 3e5823ef733aa..285ffc9fdca16 100644 --- a/client/cli/src/commands/run_cmd.rs +++ b/client/cli/src/commands/run_cmd.rs @@ -42,12 +42,11 @@ pub struct RunCmd { /// The node will be started with the authority role and actively /// participate in any consensus task that it can (e.g. depending on /// availability of local keys). - #[structopt( - long = "validator" - )] + #[structopt(long)] pub validator: bool, - /// Disable GRANDPA voter when running in validator mode, otherwise disable the GRANDPA observer. + /// Disable GRANDPA voter when running in validator mode, otherwise disable the GRANDPA + /// observer. #[structopt(long)] pub no_grandpa: bool, @@ -57,8 +56,8 @@ pub struct RunCmd { /// Listen to all RPC interfaces. /// - /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC proxy - /// server to filter out dangerous methods. More details: + /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC + /// proxy server to filter out dangerous methods. More details: /// . /// Use `--unsafe-rpc-external` to suppress the warning if you understand the risks. #[structopt(long = "rpc-external")] @@ -74,8 +73,8 @@ pub struct RunCmd { /// /// - `Unsafe`: Exposes every RPC method. /// - `Safe`: Exposes only a safe subset of RPC methods, denying unsafe RPC methods. - /// - `Auto`: Acts as `Safe` if RPC is served externally, e.g. when `--{rpc,ws}-external` is passed, - /// otherwise acts as `Unsafe`. + /// - `Auto`: Acts as `Safe` if RPC is served externally, e.g. when `--{rpc,ws}-external` is + /// passed, otherwise acts as `Unsafe`. #[structopt( long, value_name = "METHOD SET", @@ -88,8 +87,9 @@ pub struct RunCmd { /// Listen to all Websocket interfaces. /// - /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC proxy - /// server to filter out dangerous methods. More details: . + /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC + /// proxy server to filter out dangerous methods. More details: + /// . /// Use `--unsafe-ws-external` to suppress the warning if you understand the risks. #[structopt(long = "ws-external")] pub ws_external: bool, @@ -100,6 +100,11 @@ pub struct RunCmd { #[structopt(long = "unsafe-ws-external")] pub unsafe_ws_external: bool, + /// Set the the maximum RPC payload size for both requests and responses (both http and ws), in + /// megabytes. Default is 15MiB. + #[structopt(long = "rpc-max-payload")] + pub rpc_max_payload: Option, + /// Listen to all Prometheus data source interfaces. /// /// Default is local. @@ -194,7 +199,8 @@ pub struct RunCmd { #[structopt(long, conflicts_with_all = &["alice", "charlie", "dave", "eve", "ferdie", "one", "two"])] pub bob: bool, - /// Shortcut for `--name Charlie --validator` with session keys for `Charlie` added to keystore. + /// Shortcut for `--name Charlie --validator` with session keys for `Charlie` added to + /// keystore. #[structopt(long, conflicts_with_all = &["alice", "bob", "dave", "eve", "ferdie", "one", "two"])] pub charlie: bool, @@ -435,6 +441,10 @@ impl CliConfiguration for RunCmd { Ok(self.rpc_methods.into()) } + fn rpc_max_payload(&self) -> Result> { + Ok(self.rpc_max_payload) + } + fn transaction_pool(&self) -> Result { Ok(self.pool_config.transaction_pool()) } diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index 62afc849c09fb..8e435da253c04 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -372,6 +372,11 @@ pub trait CliConfiguration: Sized { Ok(Some(Vec::new())) } + /// Get maximum RPC payload. + fn rpc_max_payload(&self) -> Result> { + Ok(None) + } + /// Get the prometheus configuration (`None` if disabled) /// /// By default this is `None`. @@ -535,6 +540,7 @@ pub trait CliConfiguration: Sized { rpc_ws_max_connections: self.rpc_ws_max_connections()?, rpc_http_threads: self.rpc_http_threads()?, rpc_cors: self.rpc_cors(is_dev)?, + rpc_max_payload: self.rpc_max_payload()?, prometheus_config: self.prometheus_config(DCV::prometheus_listen_port())?, telemetry_endpoints, telemetry_external_transport: self.telemetry_external_transport()?, diff --git a/client/executor/src/native_executor.rs b/client/executor/src/native_executor.rs index c94088a155260..6fc34b6f1a322 100644 --- a/client/executor/src/native_executor.rs +++ b/client/executor/src/native_executor.rs @@ -291,7 +291,7 @@ impl NativeExecutor { default_heap_pages: Option, max_runtime_instances: usize, ) -> Self { - let extended = D::ExtendHostFunctions::host_functions(); + let extended = D::ExtendHostFunctions::host_functions(); let mut host_functions = sp_io::SubstrateHostFunctions::host_functions() .into_iter() // filter out any host function overrides provided. diff --git a/client/rpc-servers/src/lib.rs b/client/rpc-servers/src/lib.rs index cb2704efc82ab..c93451e5cc678 100644 --- a/client/rpc-servers/src/lib.rs +++ b/client/rpc-servers/src/lib.rs @@ -27,8 +27,10 @@ use jsonrpc_core::{IoHandlerExtension, MetaIoHandler}; use log::error; use pubsub::PubSubMetadata; +const MEGABYTE: usize = 1024 * 1024; + /// Maximal payload accepted by RPC servers. -pub const MAX_PAYLOAD: usize = 15 * 1024 * 1024; +pub const RPC_MAX_PAYLOAD_DEFAULT: usize = 15 * MEGABYTE; /// Default maximum number of connections for WS RPC servers. const WS_MAX_CONNECTIONS: usize = 100; @@ -85,7 +87,10 @@ mod inner { thread_pool_size: Option, cors: Option<&Vec>, io: RpcHandler, + maybe_max_payload_mb: Option, ) -> io::Result { + let max_request_body_size = maybe_max_payload_mb.map(|mb| mb.saturating_mul(MEGABYTE)) + .unwrap_or(RPC_MAX_PAYLOAD_DEFAULT); http::ServerBuilder::new(io) .threads(thread_pool_size.unwrap_or(HTTP_THREADS)) .health_api(("/health", "system_health")) @@ -96,7 +101,7 @@ mod inner { http::RestApi::Unsecure }) .cors(map_cors::(cors)) - .max_request_body_size(MAX_PAYLOAD) + .max_request_body_size(max_request_body_size) .start_http(addr) } @@ -120,14 +125,19 @@ mod inner { /// Start WS server listening on given address. /// /// **Note**: Only available if `not(target_os = "unknown")`. - pub fn start_ws>> ( + pub fn start_ws< + M: pubsub::PubSubMetadata + From>, + >( addr: &std::net::SocketAddr, max_connections: Option, cors: Option<&Vec>, io: RpcHandler, + maybe_max_payload_mb: Option, ) -> io::Result { + let rpc_max_payload = maybe_max_payload_mb.map(|mb| mb.saturating_mul(MEGABYTE)) + .unwrap_or(RPC_MAX_PAYLOAD_DEFAULT); ws::ServerBuilder::with_meta_extractor(io, |context: &ws::RequestContext| context.sender().into()) - .max_payload(MAX_PAYLOAD) + .max_payload(rpc_max_payload) .max_connections(max_connections.unwrap_or(WS_MAX_CONNECTIONS)) .allowed_origins(map_cors(cors)) .allowed_hosts(hosts_filtering(cors.is_some())) diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 803fc6797ee9a..ad9712a41db6b 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -182,6 +182,7 @@ pub fn new_full( client: Arc, subscriptions: SubscriptionManager, deny_unsafe: DenyUnsafe, + rpc_max_payload: Option, ) -> (State, ChildState) where Block: BlockT + 'static, @@ -193,9 +194,11 @@ pub fn new_full( Client::Api: Metadata, { let child_backend = Box::new( - self::state_full::FullState::new(client.clone(), subscriptions.clone()) + self::state_full::FullState::new( + client.clone(), subscriptions.clone(), rpc_max_payload + ) ); - let backend = Box::new(self::state_full::FullState::new(client, subscriptions)); + let backend = Box::new(self::state_full::FullState::new(client, subscriptions, rpc_max_payload)); (State { backend, deny_unsafe }, ChildState { backend: child_backend }) } diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index bea7ddfbb3b76..218cb35f0086e 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -67,7 +67,8 @@ struct QueryStorageRange { pub struct FullState { client: Arc, subscriptions: SubscriptionManager, - _phantom: PhantomData<(BE, Block)> + _phantom: PhantomData<(BE, Block)>, + rpc_max_payload: Option, } impl FullState @@ -78,8 +79,12 @@ impl FullState Block: BlockT + 'static, { /// Create new state API backend for full nodes. - pub fn new(client: Arc, subscriptions: SubscriptionManager) -> Self { - Self { client, subscriptions, _phantom: PhantomData } + pub fn new( + client: Arc, + subscriptions: SubscriptionManager, + rpc_max_payload: Option, + ) -> Self { + Self { client, subscriptions, _phantom: PhantomData, rpc_max_payload } } /// Returns given block hash or best block hash if None is passed. @@ -540,9 +545,15 @@ impl StateBackend for FullState, storage_keys: Option, ) -> FutureResult { + let block_executor = sc_tracing::block::BlockExecutor::new( + self.client.clone(), + block, + targets, + storage_keys, + self.rpc_max_payload, + ); Box::new(result( - sc_tracing::block::BlockExecutor::new(self.client.clone(), block, targets, storage_keys) - .trace_block() + block_executor.trace_block() .map_err(|e| invalid_block::(block, None, e.to_string())) )) } diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index cfc27c7bf525e..e413827552c9d 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -63,6 +63,7 @@ fn should_return_storage() { Arc::new(client), SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, + None, ); let key = StorageKey(KEY.to_vec()); @@ -105,6 +106,7 @@ fn should_return_child_storage() { client, SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, + None, ); let child_key = prefixed_storage_key(); let key = StorageKey(b"key".to_vec()); @@ -144,6 +146,7 @@ fn should_call_contract() { client, SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, + None, ); assert_matches!( @@ -162,6 +165,7 @@ fn should_notify_about_storage_changes() { client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, + None, ); api.subscribe_storage(Default::default(), subscriber, None.into()); @@ -200,6 +204,7 @@ fn should_send_initial_storage_changes_and_notifications() { client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, + None, ); let alice_balance_key = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Alice.into())); @@ -242,6 +247,7 @@ fn should_query_storage() { client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, + None, ); let mut add_block = |nonce| { @@ -463,6 +469,7 @@ fn should_return_runtime_version() { client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, + None, ); let result = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":1,\ @@ -490,6 +497,7 @@ fn should_notify_on_runtime_version_initially() { client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, + None, ); api.subscribe_runtime_version(Default::default(), subscriber); diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index ebf600b12f020..ca22322798463 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -804,6 +804,7 @@ fn gen_handler( client.clone(), subscriptions.clone(), deny_unsafe, + config.rpc_max_payload, ); (chain, state, child_state) }; diff --git a/client/service/src/config.rs b/client/service/src/config.rs index f2c5f2c6ed407..c91cf0a4ef5c3 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -95,6 +95,8 @@ pub struct Configuration { pub rpc_cors: Option>, /// RPC methods to expose (by default only a safe subset or all of them). pub rpc_methods: RpcMethods, + /// Maximum payload of rpc request/responses. + pub rpc_max_payload: Option, /// Prometheus endpoint configuration. `None` if disabled. pub prometheus_config: Option, /// Telemetry service URL. `None` if disabled. diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 51ee0965ebcf4..afc1209280322 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -387,6 +387,7 @@ fn start_rpc_servers< deny_unsafe(&address, &config.rpc_methods), sc_rpc_server::RpcMiddleware::new(rpc_metrics.clone(), "http") ), + config.rpc_max_payload ), )?.map(|s| waiting::HttpServer(Some(s))), maybe_start_server( @@ -399,6 +400,7 @@ fn start_rpc_servers< deny_unsafe(&address, &config.rpc_methods), sc_rpc_server::RpcMiddleware::new(rpc_metrics.clone(), "ws") ), + config.rpc_max_payload ), )?.map(|s| waiting::WsServer(Some(s))), ))) diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 3999b852ac74c..eb437b1aba0af 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -265,6 +265,7 @@ fn node_config = Result; @@ -174,6 +175,7 @@ pub struct BlockExecutor { block: Block::Hash, targets: Option, storage_keys: Option, + rpc_max_payload: usize, } impl BlockExecutor @@ -189,8 +191,11 @@ impl BlockExecutor block: Block::Hash, targets: Option, storage_keys: Option, + rpc_max_payload: Option, ) -> Self { - Self { client, block, targets, storage_keys } + let rpc_max_payload = rpc_max_payload.map(|mb| mb.saturating_mul(MEGABYTE)) + .unwrap_or(RPC_MAX_PAYLOAD_DEFAULT); + Self { client, block, targets, storage_keys, rpc_max_payload } } /// Execute block, record all spans and events belonging to `Self::targets` @@ -260,7 +265,7 @@ impl BlockExecutor tracing::debug!(target: "state_tracing", "Captured {} spans and {} events", spans.len(), events.len()); let approx_payload_size = BASE_PAYLOAD + events.len() * AVG_EVENT + spans.len() * AVG_SPAN; - let response = if approx_payload_size > MAX_PAYLOAD { + let response = if approx_payload_size > self.rpc_max_payload { TraceBlockResponse::TraceError(TraceError { error: "Payload likely exceeds max payload size of RPC server.".to_string() diff --git a/test-utils/test-runner/src/utils.rs b/test-utils/test-runner/src/utils.rs index 4f5390a7eb863..fae527ededf97 100644 --- a/test-utils/test-runner/src/utils.rs +++ b/test-utils/test-runner/src/utils.rs @@ -127,6 +127,7 @@ pub fn default_config(task_executor: TaskExecutor, mut chain_spec: Box Date: Fri, 18 Jun 2021 09:24:50 +0100 Subject: [PATCH 55/61] fix build --- frame/staking/src/testing_utils.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frame/staking/src/testing_utils.rs b/frame/staking/src/testing_utils.rs index c643cb283373b..b3b8ef0843a7f 100644 --- a/frame/staking/src/testing_utils.rs +++ b/frame/staking/src/testing_utils.rs @@ -29,9 +29,9 @@ const SEED: u32 = 0; /// This function removes all validators and nominators from storage. pub fn clear_validators_and_nominators() { - Validators::::remove_all(None); + Validators::::remove_all(); CounterForValidators::::kill(); - Nominators::::remove_all(None); + Nominators::::remove_all(); CounterForNominators::::kill(); } From 182ad61f0a5eabd24def0d8e85bc2c06a19521c1 Mon Sep 17 00:00:00 2001 From: Maksim Ramanenkau Date: Mon, 26 Sep 2022 17:23:02 +0300 Subject: [PATCH 56/61] Make tests compile --- frame/chainbridge/src/mock.rs | 2 + frame/chainbridge/src/tests.rs | 52 +++++++++---------- .../src/tests/test_runtime.rs | 4 ++ frame/erc721/src/mock.rs | 2 + 4 files changed, 34 insertions(+), 26 deletions(-) diff --git a/frame/chainbridge/src/mock.rs b/frame/chainbridge/src/mock.rs index 1565c32fdcba7..761717fcf8e96 100644 --- a/frame/chainbridge/src/mock.rs +++ b/frame/chainbridge/src/mock.rs @@ -66,6 +66,8 @@ impl pallet_balances::Config for Test { type AccountStore = System; type WeightInfo = (); type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = (); } parameter_types! { diff --git a/frame/chainbridge/src/tests.rs b/frame/chainbridge/src/tests.rs index a9081757e6c50..a31f0d63854ed 100644 --- a/frame/chainbridge/src/tests.rs +++ b/frame/chainbridge/src/tests.rs @@ -101,7 +101,7 @@ fn whitelist_chain() { Error::::InvalidChainId ); - assert_events(vec![Event::bridge(RawEvent::ChainWhitelisted(0))]); + assert_events(vec![Event::Bridge(RawEvent::ChainWhitelisted(0))]); }) } @@ -117,8 +117,8 @@ fn set_get_threshold() { assert_eq!(::get(), 5); assert_events(vec![ - Event::bridge(RawEvent::RelayerThresholdChanged(TEST_THRESHOLD)), - Event::bridge(RawEvent::RelayerThresholdChanged(5)), + Event::Bridge(RawEvent::RelayerThresholdChanged(TEST_THRESHOLD)), + Event::Bridge(RawEvent::RelayerThresholdChanged(5)), ]); }) } @@ -145,8 +145,8 @@ fn asset_transfer_success() { amount.into() )); assert_events(vec![ - Event::bridge(RawEvent::ChainWhitelisted(dest_id.clone())), - Event::bridge(RawEvent::FungibleTransfer( + Event::Bridge(RawEvent::ChainWhitelisted(dest_id.clone())), + Event::Bridge(RawEvent::FungibleTransfer( dest_id.clone(), 1, resource_id.clone(), @@ -162,7 +162,7 @@ fn asset_transfer_success() { to.clone(), metadata.clone() )); - assert_events(vec![Event::bridge(RawEvent::NonFungibleTransfer( + assert_events(vec![Event::Bridge(RawEvent::NonFungibleTransfer( dest_id.clone(), 2, resource_id.clone(), @@ -176,7 +176,7 @@ fn asset_transfer_success() { resource_id.clone(), metadata.clone() )); - assert_events(vec![Event::bridge(RawEvent::GenericTransfer( + assert_events(vec![Event::Bridge(RawEvent::GenericTransfer( dest_id.clone(), 3, resource_id, @@ -221,7 +221,7 @@ fn asset_transfer_invalid_chain() { let resource_id = [4; 32]; assert_ok!(Bridge::whitelist_chain(Origin::root(), chain_id.clone())); - assert_events(vec![Event::bridge(RawEvent::ChainWhitelisted( + assert_events(vec![Event::Bridge(RawEvent::ChainWhitelisted( chain_id.clone(), ))]); @@ -269,10 +269,10 @@ fn add_remove_relayer() { assert_eq!(Bridge::relayer_count(), 2); assert_events(vec![ - Event::bridge(RawEvent::RelayerAdded(RELAYER_A)), - Event::bridge(RawEvent::RelayerAdded(RELAYER_B)), - Event::bridge(RawEvent::RelayerAdded(RELAYER_C)), - Event::bridge(RawEvent::RelayerRemoved(RELAYER_B)), + Event::Bridge(RawEvent::RelayerAdded(RELAYER_A)), + Event::Bridge(RawEvent::RelayerAdded(RELAYER_B)), + Event::Bridge(RawEvent::RelayerAdded(RELAYER_C)), + Event::Bridge(RawEvent::RelayerRemoved(RELAYER_B)), ]); }) } @@ -342,11 +342,11 @@ fn create_sucessful_proposal() { assert_eq!(prop, expected); assert_events(vec![ - Event::bridge(RawEvent::VoteFor(src_id, prop_id, RELAYER_A)), - Event::bridge(RawEvent::VoteAgainst(src_id, prop_id, RELAYER_B)), - Event::bridge(RawEvent::VoteFor(src_id, prop_id, RELAYER_C)), - Event::bridge(RawEvent::ProposalApproved(src_id, prop_id)), - Event::bridge(RawEvent::ProposalSucceeded(src_id, prop_id)), + Event::Bridge(RawEvent::VoteFor(src_id, prop_id, RELAYER_A)), + Event::Bridge(RawEvent::VoteAgainst(src_id, prop_id, RELAYER_B)), + Event::Bridge(RawEvent::VoteFor(src_id, prop_id, RELAYER_C)), + Event::Bridge(RawEvent::ProposalApproved(src_id, prop_id)), + Event::Bridge(RawEvent::ProposalSucceeded(src_id, prop_id)), ]); }) } @@ -418,10 +418,10 @@ fn create_unsucessful_proposal() { ); assert_events(vec![ - Event::bridge(RawEvent::VoteFor(src_id, prop_id, RELAYER_A)), - Event::bridge(RawEvent::VoteAgainst(src_id, prop_id, RELAYER_B)), - Event::bridge(RawEvent::VoteAgainst(src_id, prop_id, RELAYER_C)), - Event::bridge(RawEvent::ProposalRejected(src_id, prop_id)), + Event::Bridge(RawEvent::VoteFor(src_id, prop_id, RELAYER_A)), + Event::Bridge(RawEvent::VoteAgainst(src_id, prop_id, RELAYER_B)), + Event::Bridge(RawEvent::VoteAgainst(src_id, prop_id, RELAYER_C)), + Event::Bridge(RawEvent::ProposalRejected(src_id, prop_id)), ]); }) } @@ -479,10 +479,10 @@ fn execute_after_threshold_change() { ); assert_events(vec![ - Event::bridge(RawEvent::VoteFor(src_id, prop_id, RELAYER_A)), - Event::bridge(RawEvent::RelayerThresholdChanged(1)), - Event::bridge(RawEvent::ProposalApproved(src_id, prop_id)), - Event::bridge(RawEvent::ProposalSucceeded(src_id, prop_id)), + Event::Bridge(RawEvent::VoteFor(src_id, prop_id, RELAYER_A)), + Event::Bridge(RawEvent::RelayerThresholdChanged(1)), + Event::Bridge(RawEvent::ProposalApproved(src_id, prop_id)), + Event::Bridge(RawEvent::ProposalSucceeded(src_id, prop_id)), ]); }) } @@ -557,7 +557,7 @@ fn proposal_expires() { }; assert_eq!(prop, expected); - assert_events(vec![Event::bridge(RawEvent::VoteFor( + assert_events(vec![Event::Bridge(RawEvent::VoteFor( src_id, prop_id, RELAYER_A, ))]); }) diff --git a/frame/ddc-metrics-offchain-worker/src/tests/test_runtime.rs b/frame/ddc-metrics-offchain-worker/src/tests/test_runtime.rs index 3ee6b65f9e658..6ac3757a61ce9 100644 --- a/frame/ddc-metrics-offchain-worker/src/tests/test_runtime.rs +++ b/frame/ddc-metrics-offchain-worker/src/tests/test_runtime.rs @@ -102,6 +102,8 @@ impl pallet_balances::Config for Test { type AccountStore = System; type WeightInfo = (); type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = (); } thread_local! { @@ -193,7 +195,9 @@ where { type OverarchingCall = Call; type Extrinsic = Extrinsic; + } +impl pallet_randomness_collective_flip::Config for Test {} impl CreateSignedTransaction for Test where diff --git a/frame/erc721/src/mock.rs b/frame/erc721/src/mock.rs index d23ce61f37fb6..519c66b4a8a39 100644 --- a/frame/erc721/src/mock.rs +++ b/frame/erc721/src/mock.rs @@ -63,6 +63,8 @@ impl pallet_balances::Config for Test { type AccountStore = System; type WeightInfo = (); type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = (); } parameter_types! { From 5f5ee1c6ff0e92c4adec6c298a20e9d4b21b1208 Mon Sep 17 00:00:00 2001 From: Maksim Ramanenkau Date: Mon, 26 Sep 2022 18:11:42 +0300 Subject: [PATCH 57/61] Add release notes and increment spec version --- CHANGELOG.md | 4 ++++ Cargo.lock | 8 ++++---- bin/node/runtime/src/lib.rs | 2 +- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 35b99df75883a..dd1e7a8d4680f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,10 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [2.21.0] +### Changed +- Updated Substrate to polkadot-v0.9.5 + ## [2.20.0] ### Changed - Updated Substrate to polkadot-v0.9.4 diff --git a/Cargo.lock b/Cargo.lock index 6ac7983b97762..35ae761cbcfb5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3014,7 +3014,7 @@ dependencies = [ "hyper 0.14.5", "log", "serde", - "serde_json", + "serde_json 1.0.64", "soketto 0.5.0", "thiserror", ] @@ -3034,7 +3034,7 @@ dependencies = [ "rustls 0.19.1", "rustls-native-certs 0.5.0", "serde", - "serde_json", + "serde_json 1.0.64", "soketto 0.5.0", "thiserror", "tokio 0.2.25", @@ -4239,7 +4239,7 @@ dependencies = [ "sc-tracing", "sc-transaction-pool", "serde", - "serde_json", + "serde_json 1.0.64", "soketto 0.4.2", "sp-authority-discovery", "sp-authorship", @@ -6783,7 +6783,7 @@ dependencies = [ "pallet-elections-phragmen", "parity-scale-codec", "serde", - "serde_json", + "serde_json 1.0.64", "sp-core", "sp-io", "sp-runtime", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index e7ed4ecd51226..af1bb7b0024b7 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -120,7 +120,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 290, + spec_version: 291, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 2, From 48d90a8089f1977c89c3eae39175162e7e721ca4 Mon Sep 17 00:00:00 2001 From: Maksim R Date: Mon, 26 Sep 2022 21:01:49 +0300 Subject: [PATCH 58/61] Post merge fixes --- bin/node/cli/src/chain_spec.rs | 2 +- bin/node/testing/src/genesis.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs index c90b7d227a787..cad015cc70f9c 100644 --- a/bin/node/cli/src/chain_spec.rs +++ b/bin/node/cli/src/chain_spec.rs @@ -333,7 +333,7 @@ pub fn testnet_genesis( pot: 0, max_members: 999, }, - pallet_vesting: Default::default(), + vesting: Default::default(), transaction_storage: Default::default(), } } diff --git a/bin/node/testing/src/genesis.rs b/bin/node/testing/src/genesis.rs index 9d958718f4f92..03009598e87ac 100644 --- a/bin/node/testing/src/genesis.rs +++ b/bin/node/testing/src/genesis.rs @@ -118,7 +118,7 @@ pub fn config_endowed( pot: 0, max_members: 999, }, - pallet_vesting: Default::default(), + vesting: Default::default(), transaction_storage: Default::default(), } } From 5b692a4bea1ade6d314065d74ba57b4b5c0c93a2 Mon Sep 17 00:00:00 2001 From: Maksim Ramanenkau Date: Tue, 27 Sep 2022 13:03:09 +0300 Subject: [PATCH 59/61] Fix diffs with substrate --- Cargo.lock | 23 ----------------------- client/executor/Cargo.toml | 2 +- client/network/src/protocol/sync.rs | 4 ---- client/rpc-servers/src/lib.rs | 2 +- primitives/npos-elections/src/phragmms.rs | 2 +- 5 files changed, 3 insertions(+), 30 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 35ae761cbcfb5..308ed172b2213 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7496,7 +7496,6 @@ dependencies = [ "sp-version", "sp-wasm-interface", "substrate-test-runtime", - "test-case", "tracing", "tracing-subscriber", "wasmi", @@ -9957,28 +9956,6 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "test-case" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07aea929e9488998b64adc414c29fe5620398f01c2e3f58164122b17e567a6d5" -dependencies = [ - "test-case-macros", -] - -[[package]] -name = "test-case-macros" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c95968eedc6fc4f5c21920e0f4264f78ec5e4c56bb394f319becc1a5830b3e54" -dependencies = [ - "cfg-if 1.0.0", - "proc-macro-error", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "test-runner" version = "0.9.0" diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index a692e860f728a..26bb0c4041222 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -47,7 +47,7 @@ sp-state-machine = { version = "0.9.0", path = "../../primitives/state-machine" sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } sp-tracing = { version = "3.0.0", path = "../../primitives/tracing" } sc-tracing = { version = "3.0.0", path = "../tracing" } -test-case = "2.2.1" + tracing = "0.1.25" tracing-subscriber = "0.2.18" paste = "1.0" diff --git a/client/network/src/protocol/sync.rs b/client/network/src/protocol/sync.rs index f10c24ccc20ed..7b7ac721b5b47 100644 --- a/client/network/src/protocol/sync.rs +++ b/client/network/src/protocol/sync.rs @@ -1145,10 +1145,6 @@ impl ChainSync { if let Some(peer) = who.and_then(|p| self.peers.get_mut(&p)) { peer.update_common_number(number); } - - if let Some(peer) = who.and_then(|p| self.peers.get_mut(&p)) { - peer.update_common_number(number); - } }, Err(BlockImportError::IncompleteHeader(who)) => { if let Some(peer) = who { diff --git a/client/rpc-servers/src/lib.rs b/client/rpc-servers/src/lib.rs index 984b69156b831..c93451e5cc678 100644 --- a/client/rpc-servers/src/lib.rs +++ b/client/rpc-servers/src/lib.rs @@ -30,7 +30,7 @@ use pubsub::PubSubMetadata; const MEGABYTE: usize = 1024 * 1024; /// Maximal payload accepted by RPC servers. -pub const RPC_MAX_PAYLOAD_DEFAULT: usize = 150 * MEGABYTE; +pub const RPC_MAX_PAYLOAD_DEFAULT: usize = 15 * MEGABYTE; /// Default maximum number of connections for WS RPC servers. const WS_MAX_CONNECTIONS: usize = 100; diff --git a/primitives/npos-elections/src/phragmms.rs b/primitives/npos-elections/src/phragmms.rs index 1dd2d63e59a02..9b464443917f5 100644 --- a/primitives/npos-elections/src/phragmms.rs +++ b/primitives/npos-elections/src/phragmms.rs @@ -249,7 +249,7 @@ mod tests { assert_eq!( voters.iter().find(|x| x.who == 30).map(|v| ( v.who, - v.edges.iter().map(|e| (e.who, e.weight)).collect::>() + v.edges.iter().map(|e| (e.who, e.weight)).collect::>() )).unwrap(), (30, vec![(2, 0), (3, 30)]), ); From 25c88f913706e5497af9c32dd4e641c2114e1d6d Mon Sep 17 00:00:00 2001 From: Maksim Ramanenkau Date: Tue, 27 Sep 2022 13:35:29 +0300 Subject: [PATCH 60/61] Remove pallet-transaction-storage --- Cargo.lock | 1 - bin/node/cli/src/chain_spec.rs | 1 - bin/node/runtime/Cargo.toml | 3 --- bin/node/runtime/src/lib.rs | 10 ---------- bin/node/testing/src/genesis.rs | 1 - 5 files changed, 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 308ed172b2213..29a9d256654f0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4429,7 +4429,6 @@ dependencies = [ "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", - "pallet-transaction-storage", "pallet-treasury", "pallet-utility", "pallet-vesting", diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs index cad015cc70f9c..020982ecbed23 100644 --- a/bin/node/cli/src/chain_spec.rs +++ b/bin/node/cli/src/chain_spec.rs @@ -334,7 +334,6 @@ pub fn testnet_genesis( max_members: 999, }, vesting: Default::default(), - transaction_storage: Default::default(), } } diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 0bbc166044f42..3aa03a399911a 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -83,7 +83,6 @@ pallet-treasury = { version = "3.0.0", default-features = false, path = "../../. pallet-utility = { version = "3.0.0", default-features = false, path = "../../../frame/utility" } pallet-transaction-payment = { version = "3.0.0", default-features = false, path = "../../../frame/transaction-payment" } pallet-transaction-payment-rpc-runtime-api = { version = "3.0.0", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } -pallet-transaction-storage = { version = "3.0.0", default-features = false, path = "../../../frame/transaction-storage" } pallet-vesting = { version = "3.0.0", default-features = false, path = "../../../frame/vesting" } pallet-chainbridge = { version = "2.0.0", default-features = false, path = "../../../frame/chainbridge" } pallet-cere-ddc = { version = "5.0.0", default-features = false, path = "../../../frame/ddc-pallet" } @@ -153,7 +152,6 @@ std = [ "pallet-tips/std", "pallet-transaction-payment-rpc-runtime-api/std", "pallet-transaction-payment/std", - "pallet-transaction-storage/std", "pallet-treasury/std", "sp-transaction-pool/std", "pallet-utility/std", @@ -192,7 +190,6 @@ runtime-benchmarks = [ "pallet-staking/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-tips/runtime-benchmarks", - "pallet-transaction-storage/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index af1bb7b0024b7..0ed5825a3b7c4 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1086,14 +1086,6 @@ impl pallet_ddc_metrics_offchain_worker::Config for Runtime { type Call = Call; } -impl pallet_transaction_storage::Config for Runtime { - type Event = Event; - type Currency = Balances; - type Call = Call; - type FeeDestination = (); - type WeightInfo = pallet_transaction_storage::weights::SubstrateWeight; -} - construct_runtime!( pub enum Runtime where Block = Block, @@ -1134,7 +1126,6 @@ construct_runtime!( Multisig: pallet_multisig::{Pallet, Call, Storage, Event}, Bounties: pallet_bounties::{Pallet, Call, Storage, Event}, Tips: pallet_tips::{Pallet, Call, Storage, Event}, - TransactionStorage: pallet_transaction_storage::{Pallet, Call, Storage, Inherent, Config, Event}, CereDDCModule: pallet_cere_ddc::{Pallet, Call, Storage, Event}, ChainBridge: pallet_chainbridge::{Pallet, Call, Storage, Event}, Erc721: pallet_erc721::{Pallet, Call, Storage, Event}, @@ -1464,7 +1455,6 @@ impl_runtime_apis! { add_benchmark!(params, batches, frame_system, SystemBench::); add_benchmark!(params, batches, pallet_timestamp, Timestamp); add_benchmark!(params, batches, pallet_tips, Tips); - add_benchmark!(params, batches, pallet_transaction_storage, TransactionStorage); add_benchmark!(params, batches, pallet_treasury, Treasury); add_benchmark!(params, batches, pallet_utility, Utility); add_benchmark!(params, batches, pallet_vesting, Vesting); diff --git a/bin/node/testing/src/genesis.rs b/bin/node/testing/src/genesis.rs index 03009598e87ac..b11492a3b734f 100644 --- a/bin/node/testing/src/genesis.rs +++ b/bin/node/testing/src/genesis.rs @@ -119,6 +119,5 @@ pub fn config_endowed( max_members: 999, }, vesting: Default::default(), - transaction_storage: Default::default(), } } From 23161c74911810341aa76683ad90fda201813b8d Mon Sep 17 00:00:00 2001 From: Maksim Ramanenkau Date: Tue, 22 Nov 2022 16:21:31 +0300 Subject: [PATCH 61/61] Add storage migrations --- bin/node/runtime/src/lib.rs | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 0ed5825a3b7c4..b77bd79032165 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1171,9 +1171,34 @@ pub type Executive = frame_executive::Executive< frame_system::ChainContext, Runtime, AllPallets, - (), + GrandpaStoragePrefixMigration, >; +pub struct GrandpaStoragePrefixMigration; +impl frame_support::traits::OnRuntimeUpgrade for GrandpaStoragePrefixMigration { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + use frame_support::traits::PalletInfo; + let name = ::PalletInfo::name::() + .expect("grandpa is part of pallets in construct_runtime, so it has a name; qed"); + pallet_grandpa::migrations::v3_1::migrate::(name) + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result<(), &'static str> { + use frame_support::traits::PalletInfo; + let name = ::PalletInfo::name::() + .expect("grandpa is part of pallets in construct_runtime, so it has a name; qed"); + pallet_grandpa::migrations::v3_1::pre_migration::(name); + Ok(()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade() -> Result<(), &'static str> { + pallet_grandpa::migrations::v3_1::post_migration::(); + Ok(()) + } +} + impl_runtime_apis! { impl sp_api::Core for Runtime { fn version() -> RuntimeVersion {