From d86bd7981e67829c60a59b1c4c6a61f6341b4579 Mon Sep 17 00:00:00 2001 From: Dmitry Sinyavin Date: Sat, 6 May 2023 14:40:29 +0200 Subject: [PATCH 01/35] Implement wrapper allocator -- draft --- Cargo.lock | 9 ++++ Cargo.toml | 3 ++ node/core/pvf/worker/Cargo.toml | 2 + node/core/pvf/worker/src/prepare.rs | 16 ++++++ node/wrapper-allocator/Cargo.toml | 9 ++++ node/wrapper-allocator/src/lib.rs | 79 +++++++++++++++++++++++++++++ src/main.rs | 2 +- 7 files changed, 119 insertions(+), 1 deletion(-) create mode 100644 node/wrapper-allocator/Cargo.toml create mode 100644 node/wrapper-allocator/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index bf213e8ef79c..0c929ebb59b0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6723,6 +6723,7 @@ dependencies = [ "tempfile", "tikv-jemallocator", "tokio", + "wrapper-allocator", ] [[package]] @@ -7387,6 +7388,7 @@ dependencies = [ "tikv-jemalloc-ctl", "tokio", "tracing-gum", + "wrapper-allocator", ] [[package]] @@ -14220,6 +14222,13 @@ dependencies = [ "winapi", ] +[[package]] +name = "wrapper-allocator" +version = "0.9.41" +dependencies = [ + "tikv-jemallocator", +] + [[package]] name = "wyz" version = "0.5.1" diff --git a/Cargo.toml b/Cargo.toml index 886b489a5024..0a51bbf39170 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,6 +26,7 @@ tikv-jemallocator = "0.5.0" polkadot-cli = { path = "cli", features = [ "kusama-native", "westend-native", "rococo-native" ] } polkadot-node-core-pvf-worker = { path = "node/core/pvf/worker" } polkadot-overseer = { path = "node/overseer" } +wrapper-allocator = { path = "node/wrapper-allocator", optional = true } [dev-dependencies] assert_cmd = "2.0.4" @@ -101,6 +102,7 @@ members = [ "node/subsystem-types", "node/subsystem-test-helpers", "node/subsystem-util", + "node/wrapper-allocator", "node/jaeger", "node/gum", "node/gum/proc-macro", @@ -208,6 +210,7 @@ fast-runtime = [ "polkadot-cli/fast-runtime" ] runtime-metrics = [ "polkadot-cli/runtime-metrics" ] pyroscope = ["polkadot-cli/pyroscope"] jemalloc-allocator = ["polkadot-node-core-pvf-worker/jemalloc-allocator", "polkadot-overseer/jemalloc-allocator"] +wrapper-allocator = ["jemalloc-allocator", "dep:wrapper-allocator", "polkadot-node-core-pvf-worker/wrapper-allocator"] # Configuration for building a .deb package - for use with `cargo-deb` [package.metadata.deb] diff --git a/node/core/pvf/worker/Cargo.toml b/node/core/pvf/worker/Cargo.toml index 260c6217eb67..3ff9815c412a 100644 --- a/node/core/pvf/worker/Cargo.toml +++ b/node/core/pvf/worker/Cargo.toml @@ -18,6 +18,7 @@ rayon = "1.5.1" tempfile = "3.3.0" tikv-jemalloc-ctl = { version = "0.5.0", optional = true } tokio = "1.24.2" +wrapper-allocator = { path = "../../../wrapper-allocator", optional = true } parity-scale-codec = { version = "3.4.0", default-features = false, features = ["derive"] } @@ -47,3 +48,4 @@ tempfile = "3.3.0" [features] jemalloc-allocator = ["dep:tikv-jemalloc-ctl"] +wrapper-allocator = ["dep:wrapper-allocator"] diff --git a/node/core/pvf/worker/src/prepare.rs b/node/core/pvf/worker/src/prepare.rs index 3cec7439f8df..4e3309bb7795 100644 --- a/node/core/pvf/worker/src/prepare.rs +++ b/node/core/pvf/worker/src/prepare.rs @@ -18,6 +18,8 @@ use crate::memory_stats::max_rss_stat::{extract_max_rss_stat, get_max_rss_thread}; #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] use crate::memory_stats::memory_tracker::{get_memory_tracker_loop_stats, memory_tracker_loop}; +#[cfg(feature = "wrapper-allocator")] +use wrapper_allocator::ALLOCATOR_DATA; use crate::{ common::{bytes_to_path, cpu_time_monitor_loop, worker_event_loop}, prepare, prevalidate, LOG_TARGET, @@ -109,8 +111,22 @@ pub fn worker_entrypoint(socket_path: &str, node_version: Option<&str>) { // Spawn another thread for preparation. let prepare_fut = rt_handle .spawn_blocking(move || { + #[cfg(feature = "wrapper-allocator")] + ALLOCATOR_DATA.checkpoint(); + let result = prepare_artifact(pvf); + #[cfg(feature = "wrapper-allocator")] + { + let peak = ALLOCATOR_DATA.checkpoint(); + gum::debug!( + target: LOG_TARGET, + %worker_pid, + "prepare job peak allocation is {} bytes", + peak, + ); + } + // Get the `ru_maxrss` stat. If supported, call getrusage for the thread. #[cfg(target_os = "linux")] let result = result.map(|artifact| (artifact, get_max_rss_thread())); diff --git a/node/wrapper-allocator/Cargo.toml b/node/wrapper-allocator/Cargo.toml new file mode 100644 index 000000000000..fca867934e3d --- /dev/null +++ b/node/wrapper-allocator/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "wrapper-allocator" +description = "Wrapper allocator to control amount of memory consumed by PVF preparation process" +version.workspace = true +authors.workspace = true +edition.workspace = true + +[dependencies] +tikv-jemallocator = "0.5.0" diff --git a/node/wrapper-allocator/src/lib.rs b/node/wrapper-allocator/src/lib.rs new file mode 100644 index 000000000000..f21e9dc03711 --- /dev/null +++ b/node/wrapper-allocator/src/lib.rs @@ -0,0 +1,79 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use core::sync::atomic::{ Ordering::SeqCst, AtomicUsize }; +use core::alloc::{GlobalAlloc, Layout}; +use tikv_jemallocator::Jemalloc; + +/// +pub struct WrapperAllocatorData { + allocated: AtomicUsize, + checkpoint: AtomicUsize, + peak: AtomicUsize, + // limit: AtomicUsize, // Should we introduce a checkpoint limit and fail allocation if the limit is hit? +} + +impl WrapperAllocatorData { + /// Marks a new checkpoint. Returns peak allocation, in bytes, since the last checkpoint. + pub fn checkpoint(&self) -> usize { + let alloc = ALLOCATOR_DATA.allocated.load(SeqCst); + let old_cp = ALLOCATOR_DATA.checkpoint.swap(alloc, SeqCst); + ALLOCATOR_DATA.peak.swap(alloc, SeqCst).saturating_sub(old_cp) + } +} + +pub static ALLOCATOR_DATA: WrapperAllocatorData = WrapperAllocatorData { allocated: AtomicUsize::new(0), checkpoint: AtomicUsize::new(0), peak: AtomicUsize::new(0) }; + +struct WrapperAllocator(A); + +unsafe impl GlobalAlloc for WrapperAllocator { + + // SAFETY: The wrapped methods are as safe as the underlying allocator implementation is. + + #[inline] + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + let old_alloc = ALLOCATOR_DATA.allocated.fetch_add(layout.size(), SeqCst); + ALLOCATOR_DATA.peak.fetch_max(old_alloc + layout.size(), SeqCst); + self.0.alloc(layout) + } + + #[inline] + unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { + let old_alloc = ALLOCATOR_DATA.allocated.fetch_add(layout.size(), SeqCst); + ALLOCATOR_DATA.peak.fetch_max(old_alloc + layout.size(), SeqCst); + self.0.alloc_zeroed(layout) + } + + #[inline] + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) -> () { + ALLOCATOR_DATA.allocated.fetch_sub(layout.size(), SeqCst); + self.0.dealloc(ptr, layout) + } + + #[inline] + unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + if new_size > layout.size() { + let old_alloc = ALLOCATOR_DATA.allocated.fetch_add(new_size - layout.size(), SeqCst); + ALLOCATOR_DATA.peak.fetch_max(old_alloc + new_size - layout.size(), SeqCst); + } else { + ALLOCATOR_DATA.allocated.fetch_sub(layout.size() - new_size, SeqCst); + } + self.0.realloc(ptr, layout, new_size) + } +} + +#[global_allocator] +static ALLOC: WrapperAllocator = WrapperAllocator(Jemalloc); diff --git a/src/main.rs b/src/main.rs index 5986d8cea7bb..ba4355b2d027 100644 --- a/src/main.rs +++ b/src/main.rs @@ -22,7 +22,7 @@ use color_eyre::eyre; /// Global allocator. Changing it to another allocator will require changing /// `memory_stats::MemoryAllocationTracker`. -#[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] +#[cfg(all(any(target_os = "linux", feature = "jemalloc-allocator"), not(feature = "wrapper-allocator")))] #[global_allocator] pub static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; From 998a6fff3e001531194089c9c19e0bf8bde338ba Mon Sep 17 00:00:00 2001 From: Dmitry Sinyavin Date: Tue, 9 May 2023 15:32:27 +0200 Subject: [PATCH 02/35] Minor fixes --- node/wrapper-allocator/src/lib.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/node/wrapper-allocator/src/lib.rs b/node/wrapper-allocator/src/lib.rs index f21e9dc03711..891c6947e967 100644 --- a/node/wrapper-allocator/src/lib.rs +++ b/node/wrapper-allocator/src/lib.rs @@ -18,7 +18,6 @@ use core::sync::atomic::{ Ordering::SeqCst, AtomicUsize }; use core::alloc::{GlobalAlloc, Layout}; use tikv_jemallocator::Jemalloc; -/// pub struct WrapperAllocatorData { allocated: AtomicUsize, checkpoint: AtomicUsize, @@ -29,9 +28,9 @@ pub struct WrapperAllocatorData { impl WrapperAllocatorData { /// Marks a new checkpoint. Returns peak allocation, in bytes, since the last checkpoint. pub fn checkpoint(&self) -> usize { - let alloc = ALLOCATOR_DATA.allocated.load(SeqCst); - let old_cp = ALLOCATOR_DATA.checkpoint.swap(alloc, SeqCst); - ALLOCATOR_DATA.peak.swap(alloc, SeqCst).saturating_sub(old_cp) + let allocated = ALLOCATOR_DATA.allocated.load(SeqCst); + let old_cp = ALLOCATOR_DATA.checkpoint.swap(allocated, SeqCst); + ALLOCATOR_DATA.peak.swap(allocated, SeqCst).saturating_sub(old_cp) } } From dbd40a735809619a340a3027c4c6350155bccd34 Mon Sep 17 00:00:00 2001 From: Dmitry Sinyavin Date: Thu, 18 May 2023 15:58:36 +0200 Subject: [PATCH 03/35] Backlog tracking allocator --- node/core/pvf/worker/src/prepare.rs | 11 +- node/wrapper-allocator/src/lib.rs | 167 ++++++++++++++++++++-------- src/main.rs | 5 +- 3 files changed, 130 insertions(+), 53 deletions(-) diff --git a/node/core/pvf/worker/src/prepare.rs b/node/core/pvf/worker/src/prepare.rs index 4e3309bb7795..82836def37df 100644 --- a/node/core/pvf/worker/src/prepare.rs +++ b/node/core/pvf/worker/src/prepare.rs @@ -18,8 +18,6 @@ use crate::memory_stats::max_rss_stat::{extract_max_rss_stat, get_max_rss_thread}; #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] use crate::memory_stats::memory_tracker::{get_memory_tracker_loop_stats, memory_tracker_loop}; -#[cfg(feature = "wrapper-allocator")] -use wrapper_allocator::ALLOCATOR_DATA; use crate::{ common::{bytes_to_path, cpu_time_monitor_loop, worker_event_loop}, prepare, prevalidate, LOG_TARGET, @@ -33,6 +31,8 @@ use polkadot_node_core_pvf::{ }; use std::{any::Any, panic, path::PathBuf, sync::mpsc::channel}; use tokio::{io, net::UnixStream}; +#[cfg(feature = "wrapper-allocator")] +use wrapper_allocator::ALLOC; async fn recv_request(stream: &mut UnixStream) -> io::Result<(PvfPrepData, PathBuf)> { let pvf = framed_recv(stream).await?; @@ -112,18 +112,19 @@ pub fn worker_entrypoint(socket_path: &str, node_version: Option<&str>) { let prepare_fut = rt_handle .spawn_blocking(move || { #[cfg(feature = "wrapper-allocator")] - ALLOCATOR_DATA.checkpoint(); + ALLOC.start_tracking(100_000_000); let result = prepare_artifact(pvf); #[cfg(feature = "wrapper-allocator")] { - let peak = ALLOCATOR_DATA.checkpoint(); + let (events, peak) = ALLOC.end_tracking(); gum::debug!( target: LOG_TARGET, %worker_pid, - "prepare job peak allocation is {} bytes", + "prepare job peak allocation is {} bytes in {} events", peak, + events, ); } diff --git a/node/wrapper-allocator/src/lib.rs b/node/wrapper-allocator/src/lib.rs index 891c6947e967..e9da9a975c48 100644 --- a/node/wrapper-allocator/src/lib.rs +++ b/node/wrapper-allocator/src/lib.rs @@ -14,65 +14,138 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use core::sync::atomic::{ Ordering::SeqCst, AtomicUsize }; +//! Tracking global allocator. Initially just forwards allocation and deallocation requests +//! to the underlying allocator. When tracking is enabled, stores every allocation event into +//! pre-allocated backlog. When tracking mode is disables, replays the backlog and counts the +//! number of allocation events and the peak allocation value. + use core::alloc::{GlobalAlloc, Layout}; +use std::sync::{ + atomic::{AtomicUsize, Ordering::Relaxed}, + RwLock, +}; use tikv_jemallocator::Jemalloc; -pub struct WrapperAllocatorData { - allocated: AtomicUsize, - checkpoint: AtomicUsize, - peak: AtomicUsize, - // limit: AtomicUsize, // Should we introduce a checkpoint limit and fail allocation if the limit is hit? +struct WrapperAllocatorData { + tracking: RwLock, + backlog: Vec, + backlog_index: AtomicUsize, } impl WrapperAllocatorData { - /// Marks a new checkpoint. Returns peak allocation, in bytes, since the last checkpoint. - pub fn checkpoint(&self) -> usize { - let allocated = ALLOCATOR_DATA.allocated.load(SeqCst); - let old_cp = ALLOCATOR_DATA.checkpoint.swap(allocated, SeqCst); - ALLOCATOR_DATA.peak.swap(allocated, SeqCst).saturating_sub(old_cp) - } + // SAFETY: + // * Tracking must only be performed by a single thread at a time + // * `start_tracking` and `stop_tracking` must be called from the same thread + // * Tracking periods must not overlap + // * Caller must provide sufficient backlog size + + unsafe fn start_tracking(&mut self, backlog_size: usize) { + // Allocate the backlog before locking anything. The allocation won't be available later. + let backlog = Vec::with_capacity(backlog_size); + // Lock allocations, move the allocated vector to our place and start tracking. + let mut tracking = self.tracking.write().unwrap(); + assert!(!*tracking); // Shouldn't start tracking if already tracking + self.backlog = backlog; + self.backlog.resize(backlog_size, 0); + self.backlog_index.store(0, Relaxed); + *tracking = true; + } + + unsafe fn end_tracking(&mut self) -> (usize, isize) { + let mut tracking = self.tracking.write().unwrap(); + assert!(*tracking); // Start/end calls must be consistent + + // At this point, all the allocation is blocked as all the threads are waiting for + // read lock on `tracking`. The following code replays the backlog and calulates the + // peak value. It must not perform any allocation, otherwise a deadlock will occur. + let mut peak = 0; + let mut alloc = 0; + let mut events = 0usize; + for i in 0..self.backlog.len() { + if self.backlog[i] == 0 { + break + } + events += 1; + alloc += self.backlog[i]; + if alloc > peak { + peak = alloc + } + } + *tracking = false; + (events, peak) + } + + #[inline] + unsafe fn track(&mut self, alloc: isize) { + let tracking = self.tracking.read().unwrap(); + if !*tracking { + return + } + let i = self.backlog_index.fetch_add(1, Relaxed); + if i == self.backlog.len() { + // We cannot use formatted text here as it would result in allocations and a deadlock + panic!("Backlog size provided was not enough for allocation tracking"); + } + // It is safe as the vector is pre-allocated and the index is acquired atomically + self.backlog[i] = alloc; + } } -pub static ALLOCATOR_DATA: WrapperAllocatorData = WrapperAllocatorData { allocated: AtomicUsize::new(0), checkpoint: AtomicUsize::new(0), peak: AtomicUsize::new(0) }; +static mut ALLOCATOR_DATA: WrapperAllocatorData = WrapperAllocatorData { + tracking: RwLock::new(false), + backlog: vec![], + backlog_index: AtomicUsize::new(0), +}; + +pub struct WrapperAllocator(A); -struct WrapperAllocator(A); +impl WrapperAllocator { + /// Start tracking with the given backlog size (in allocation events). Providing insufficient + /// backlog size will result in a panic. + pub fn start_tracking(&self, backlog_size: usize) { + unsafe { + ALLOCATOR_DATA.start_tracking(backlog_size); + } + } + + /// End tracking and return number of allocation events (as `usize`) and peak allocation + /// value in bytes (as `isize`). Peak allocation value is not guaranteed to be neither + /// non-zero nor positive. + pub fn end_tracking(&self) -> (usize, isize) { + unsafe { ALLOCATOR_DATA.end_tracking() } + } +} unsafe impl GlobalAlloc for WrapperAllocator { + // SAFETY: + // * The wrapped methods are as safe as the underlying allocator implementation is + // * In tracking mode, it is safe as long as a sufficient backlog size is provided when + // entering the tracking mode + + #[inline] + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + ALLOCATOR_DATA.track(layout.size() as isize); + self.0.alloc(layout) + } + + #[inline] + unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { + ALLOCATOR_DATA.track(layout.size() as isize); + self.0.alloc_zeroed(layout) + } + + #[inline] + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) -> () { + ALLOCATOR_DATA.track(-(layout.size() as isize)); + self.0.dealloc(ptr, layout) + } - // SAFETY: The wrapped methods are as safe as the underlying allocator implementation is. - - #[inline] - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - let old_alloc = ALLOCATOR_DATA.allocated.fetch_add(layout.size(), SeqCst); - ALLOCATOR_DATA.peak.fetch_max(old_alloc + layout.size(), SeqCst); - self.0.alloc(layout) - } - - #[inline] - unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { - let old_alloc = ALLOCATOR_DATA.allocated.fetch_add(layout.size(), SeqCst); - ALLOCATOR_DATA.peak.fetch_max(old_alloc + layout.size(), SeqCst); - self.0.alloc_zeroed(layout) - } - - #[inline] - unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) -> () { - ALLOCATOR_DATA.allocated.fetch_sub(layout.size(), SeqCst); - self.0.dealloc(ptr, layout) - } - - #[inline] - unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { - if new_size > layout.size() { - let old_alloc = ALLOCATOR_DATA.allocated.fetch_add(new_size - layout.size(), SeqCst); - ALLOCATOR_DATA.peak.fetch_max(old_alloc + new_size - layout.size(), SeqCst); - } else { - ALLOCATOR_DATA.allocated.fetch_sub(layout.size() - new_size, SeqCst); - } - self.0.realloc(ptr, layout, new_size) - } + #[inline] + unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + ALLOCATOR_DATA.track((new_size as isize) - (layout.size() as isize)); + self.0.realloc(ptr, layout, new_size) + } } #[global_allocator] -static ALLOC: WrapperAllocator = WrapperAllocator(Jemalloc); +pub static ALLOC: WrapperAllocator = WrapperAllocator(Jemalloc); diff --git a/src/main.rs b/src/main.rs index ba4355b2d027..9f614507f6fd 100644 --- a/src/main.rs +++ b/src/main.rs @@ -22,7 +22,10 @@ use color_eyre::eyre; /// Global allocator. Changing it to another allocator will require changing /// `memory_stats::MemoryAllocationTracker`. -#[cfg(all(any(target_os = "linux", feature = "jemalloc-allocator"), not(feature = "wrapper-allocator")))] +#[cfg(all( + any(target_os = "linux", feature = "jemalloc-allocator"), + not(feature = "wrapper-allocator") +))] #[global_allocator] pub static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; From c8e9d1c27782589afd8d442bef988c723b5abc08 Mon Sep 17 00:00:00 2001 From: Dmitry Sinyavin Date: Fri, 19 May 2023 15:53:13 +0200 Subject: [PATCH 04/35] Try spinlock approach --- node/core/pvf/worker/src/prepare.rs | 7 +- node/wrapper-allocator/src/lib.rs | 131 ++++++++++++---------------- 2 files changed, 61 insertions(+), 77 deletions(-) diff --git a/node/core/pvf/worker/src/prepare.rs b/node/core/pvf/worker/src/prepare.rs index 82836def37df..f5dc3ec01984 100644 --- a/node/core/pvf/worker/src/prepare.rs +++ b/node/core/pvf/worker/src/prepare.rs @@ -112,19 +112,18 @@ pub fn worker_entrypoint(socket_path: &str, node_version: Option<&str>) { let prepare_fut = rt_handle .spawn_blocking(move || { #[cfg(feature = "wrapper-allocator")] - ALLOC.start_tracking(100_000_000); + ALLOC.start_tracking(); let result = prepare_artifact(pvf); #[cfg(feature = "wrapper-allocator")] { - let (events, peak) = ALLOC.end_tracking(); + let peak = ALLOC.end_tracking(); gum::debug!( target: LOG_TARGET, %worker_pid, - "prepare job peak allocation is {} bytes in {} events", + "prepare job peak allocation is {} bytes", peak, - events, ); } diff --git a/node/wrapper-allocator/src/lib.rs b/node/wrapper-allocator/src/lib.rs index e9da9a975c48..7e0ba5d0647c 100644 --- a/node/wrapper-allocator/src/lib.rs +++ b/node/wrapper-allocator/src/lib.rs @@ -14,104 +14,91 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Tracking global allocator. Initially just forwards allocation and deallocation requests -//! to the underlying allocator. When tracking is enabled, stores every allocation event into -//! pre-allocated backlog. When tracking mode is disables, replays the backlog and counts the -//! number of allocation events and the peak allocation value. +//! Tracking global allocator. Calculates the peak allocation between two checkpoints. use core::alloc::{GlobalAlloc, Layout}; -use std::sync::{ - atomic::{AtomicUsize, Ordering::Relaxed}, - RwLock, -}; +use std::sync::atomic::{AtomicBool, Ordering}; use tikv_jemallocator::Jemalloc; struct WrapperAllocatorData { - tracking: RwLock, - backlog: Vec, - backlog_index: AtomicUsize, + lock: AtomicBool, + current: isize, + peak: isize, } impl WrapperAllocatorData { - // SAFETY: - // * Tracking must only be performed by a single thread at a time - // * `start_tracking` and `stop_tracking` must be called from the same thread - // * Tracking periods must not overlap - // * Caller must provide sufficient backlog size - - unsafe fn start_tracking(&mut self, backlog_size: usize) { - // Allocate the backlog before locking anything. The allocation won't be available later. - let backlog = Vec::with_capacity(backlog_size); - // Lock allocations, move the allocated vector to our place and start tracking. - let mut tracking = self.tracking.write().unwrap(); - assert!(!*tracking); // Shouldn't start tracking if already tracking - self.backlog = backlog; - self.backlog.resize(backlog_size, 0); - self.backlog_index.store(0, Relaxed); - *tracking = true; - } - - unsafe fn end_tracking(&mut self) -> (usize, isize) { - let mut tracking = self.tracking.write().unwrap(); - assert!(*tracking); // Start/end calls must be consistent - - // At this point, all the allocation is blocked as all the threads are waiting for - // read lock on `tracking`. The following code replays the backlog and calulates the - // peak value. It must not perform any allocation, otherwise a deadlock will occur. - let mut peak = 0; - let mut alloc = 0; - let mut events = 0usize; - for i in 0..self.backlog.len() { - if self.backlog[i] == 0 { + #[inline] + fn lock(&self) { + loop { + // Try to acquire the lock. + if self + .lock + .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed) + .is_ok() + { break } - events += 1; - alloc += self.backlog[i]; - if alloc > peak { - peak = alloc + // We failed to acquire the lock; wait until it's unlocked. + // + // In theory this should result in less coherency traffic as unlike `compare_exchange` + // it is a read-only operation, so multiple cores can execute it simultaneously + // without taking an exclusive lock over the cache line. + while self.lock.load(Ordering::Relaxed) { + std::hint::spin_loop(); } } - *tracking = false; - (events, peak) } #[inline] - unsafe fn track(&mut self, alloc: isize) { - let tracking = self.tracking.read().unwrap(); - if !*tracking { - return - } - let i = self.backlog_index.fetch_add(1, Relaxed); - if i == self.backlog.len() { - // We cannot use formatted text here as it would result in allocations and a deadlock - panic!("Backlog size provided was not enough for allocation tracking"); + fn unlock(&self) { + self.lock.store(false, Ordering::Release); + } + + fn start_tracking(&mut self) { + self.lock(); + self.current = 0; + self.peak = 0; + self.unlock(); + } + + fn end_tracking(&self) -> isize { + self.lock(); + let peak = self.peak; + self.unlock(); + peak + } + + #[inline] + fn track(&mut self, alloc: isize) { + self.lock(); + self.current += alloc; + if self.current > self.peak { + self.peak = self.current; } - // It is safe as the vector is pre-allocated and the index is acquired atomically - self.backlog[i] = alloc; + self.unlock(); } } -static mut ALLOCATOR_DATA: WrapperAllocatorData = WrapperAllocatorData { - tracking: RwLock::new(false), - backlog: vec![], - backlog_index: AtomicUsize::new(0), -}; +static mut ALLOCATOR_DATA: WrapperAllocatorData = + WrapperAllocatorData { lock: AtomicBool::new(false), current: 0, peak: 0 }; pub struct WrapperAllocator(A); impl WrapperAllocator { - /// Start tracking with the given backlog size (in allocation events). Providing insufficient - /// backlog size will result in a panic. - pub fn start_tracking(&self, backlog_size: usize) { + // SAFETY: + // * The following functions write to `static mut`. That is safe as the critical section + // inside is isolated by an exclusive lock. + + /// Start tracking + pub fn start_tracking(&self) { unsafe { - ALLOCATOR_DATA.start_tracking(backlog_size); + ALLOCATOR_DATA.start_tracking(); } } - /// End tracking and return number of allocation events (as `usize`) and peak allocation - /// value in bytes (as `isize`). Peak allocation value is not guaranteed to be neither - /// non-zero nor positive. - pub fn end_tracking(&self) -> (usize, isize) { + /// End tracking and return the peak allocation value in bytes (as `isize`). Peak allocation + /// value is not guaranteed to be neither non-zero nor positive. + pub fn end_tracking(&self) -> isize { unsafe { ALLOCATOR_DATA.end_tracking() } } } @@ -119,8 +106,6 @@ impl WrapperAllocator { unsafe impl GlobalAlloc for WrapperAllocator { // SAFETY: // * The wrapped methods are as safe as the underlying allocator implementation is - // * In tracking mode, it is safe as long as a sufficient backlog size is provided when - // entering the tracking mode #[inline] unsafe fn alloc(&self, layout: Layout) -> *mut u8 { From b841129e0552cfa981ccb3ef2db660d8d70ee0b3 Mon Sep 17 00:00:00 2001 From: Dmitry Sinyavin Date: Fri, 19 May 2023 16:01:49 +0200 Subject: [PATCH 05/35] Rename things --- Cargo.lock | 18 +++++++++--------- Cargo.toml | 6 +++--- node/core/pvf/worker/Cargo.toml | 4 ++-- .../Cargo.toml | 4 ++-- .../src/lib.rs | 16 ++++++++-------- 5 files changed, 24 insertions(+), 24 deletions(-) rename node/{wrapper-allocator => tracking-allocator}/Cargo.toml (50%) rename node/{wrapper-allocator => tracking-allocator}/src/lib.rs (88%) diff --git a/Cargo.lock b/Cargo.lock index 0c929ebb59b0..67f45e1093e2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6723,7 +6723,7 @@ dependencies = [ "tempfile", "tikv-jemallocator", "tokio", - "wrapper-allocator", + "tracking-allocator", ] [[package]] @@ -7388,7 +7388,7 @@ dependencies = [ "tikv-jemalloc-ctl", "tokio", "tracing-gum", - "wrapper-allocator", + "tracking-allocator", ] [[package]] @@ -12795,6 +12795,13 @@ dependencies = [ "tracing-log", ] +[[package]] +name = "tracking-allocator" +version = "0.9.41" +dependencies = [ + "tikv-jemallocator", +] + [[package]] name = "trie-db" version = "0.27.0" @@ -14222,13 +14229,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "wrapper-allocator" -version = "0.9.41" -dependencies = [ - "tikv-jemallocator", -] - [[package]] name = "wyz" version = "0.5.1" diff --git a/Cargo.toml b/Cargo.toml index 0a51bbf39170..671f8e1783f4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,7 +26,7 @@ tikv-jemallocator = "0.5.0" polkadot-cli = { path = "cli", features = [ "kusama-native", "westend-native", "rococo-native" ] } polkadot-node-core-pvf-worker = { path = "node/core/pvf/worker" } polkadot-overseer = { path = "node/overseer" } -wrapper-allocator = { path = "node/wrapper-allocator", optional = true } +tracking-allocator = { path = "node/tracking-allocator", optional = true } [dev-dependencies] assert_cmd = "2.0.4" @@ -102,7 +102,7 @@ members = [ "node/subsystem-types", "node/subsystem-test-helpers", "node/subsystem-util", - "node/wrapper-allocator", + "node/tracking-allocator", "node/jaeger", "node/gum", "node/gum/proc-macro", @@ -210,7 +210,7 @@ fast-runtime = [ "polkadot-cli/fast-runtime" ] runtime-metrics = [ "polkadot-cli/runtime-metrics" ] pyroscope = ["polkadot-cli/pyroscope"] jemalloc-allocator = ["polkadot-node-core-pvf-worker/jemalloc-allocator", "polkadot-overseer/jemalloc-allocator"] -wrapper-allocator = ["jemalloc-allocator", "dep:wrapper-allocator", "polkadot-node-core-pvf-worker/wrapper-allocator"] +tracking-allocator = ["jemalloc-allocator", "dep:tracking-allocator", "polkadot-node-core-pvf-worker/tracking-allocator"] # Configuration for building a .deb package - for use with `cargo-deb` [package.metadata.deb] diff --git a/node/core/pvf/worker/Cargo.toml b/node/core/pvf/worker/Cargo.toml index 3ff9815c412a..775660c2ce25 100644 --- a/node/core/pvf/worker/Cargo.toml +++ b/node/core/pvf/worker/Cargo.toml @@ -18,7 +18,7 @@ rayon = "1.5.1" tempfile = "3.3.0" tikv-jemalloc-ctl = { version = "0.5.0", optional = true } tokio = "1.24.2" -wrapper-allocator = { path = "../../../wrapper-allocator", optional = true } +tracking-allocator = { path = "../../../tracking-allocator", optional = true } parity-scale-codec = { version = "3.4.0", default-features = false, features = ["derive"] } @@ -48,4 +48,4 @@ tempfile = "3.3.0" [features] jemalloc-allocator = ["dep:tikv-jemalloc-ctl"] -wrapper-allocator = ["dep:wrapper-allocator"] +tracking-allocator = ["dep:tracking-allocator"] diff --git a/node/wrapper-allocator/Cargo.toml b/node/tracking-allocator/Cargo.toml similarity index 50% rename from node/wrapper-allocator/Cargo.toml rename to node/tracking-allocator/Cargo.toml index fca867934e3d..81f95b923398 100644 --- a/node/wrapper-allocator/Cargo.toml +++ b/node/tracking-allocator/Cargo.toml @@ -1,6 +1,6 @@ [package] -name = "wrapper-allocator" -description = "Wrapper allocator to control amount of memory consumed by PVF preparation process" +name = "tracking-allocator" +description = "Tracking allocator to control amount of memory consumed by PVF preparation process" version.workspace = true authors.workspace = true edition.workspace = true diff --git a/node/wrapper-allocator/src/lib.rs b/node/tracking-allocator/src/lib.rs similarity index 88% rename from node/wrapper-allocator/src/lib.rs rename to node/tracking-allocator/src/lib.rs index 7e0ba5d0647c..5b51f26184ab 100644 --- a/node/wrapper-allocator/src/lib.rs +++ b/node/tracking-allocator/src/lib.rs @@ -20,13 +20,13 @@ use core::alloc::{GlobalAlloc, Layout}; use std::sync::atomic::{AtomicBool, Ordering}; use tikv_jemallocator::Jemalloc; -struct WrapperAllocatorData { +struct TrackingAllocatorData { lock: AtomicBool, current: isize, peak: isize, } -impl WrapperAllocatorData { +impl TrackingAllocatorData { #[inline] fn lock(&self) { loop { @@ -79,12 +79,12 @@ impl WrapperAllocatorData { } } -static mut ALLOCATOR_DATA: WrapperAllocatorData = - WrapperAllocatorData { lock: AtomicBool::new(false), current: 0, peak: 0 }; +static mut ALLOCATOR_DATA: TrackingAllocatorData = + TrackingAllocatorData { lock: AtomicBool::new(false), current: 0, peak: 0 }; -pub struct WrapperAllocator(A); +pub struct TrackingAllocator(A); -impl WrapperAllocator { +impl TrackingAllocator { // SAFETY: // * The following functions write to `static mut`. That is safe as the critical section // inside is isolated by an exclusive lock. @@ -103,7 +103,7 @@ impl WrapperAllocator { } } -unsafe impl GlobalAlloc for WrapperAllocator { +unsafe impl GlobalAlloc for TrackingAllocator { // SAFETY: // * The wrapped methods are as safe as the underlying allocator implementation is @@ -133,4 +133,4 @@ unsafe impl GlobalAlloc for WrapperAllocator { } #[global_allocator] -pub static ALLOC: WrapperAllocator = WrapperAllocator(Jemalloc); +pub static ALLOC: TrackingAllocator = TrackingAllocator(Jemalloc); From 089e6d80ba534a80a81e323b15b863d3d53089ba Mon Sep 17 00:00:00 2001 From: Dmitry Sinyavin Date: Mon, 22 May 2023 15:48:09 +0200 Subject: [PATCH 06/35] Fix feature name --- node/core/pvf/worker/src/prepare.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/node/core/pvf/worker/src/prepare.rs b/node/core/pvf/worker/src/prepare.rs index c2a9f66b0209..62f323d423c6 100644 --- a/node/core/pvf/worker/src/prepare.rs +++ b/node/core/pvf/worker/src/prepare.rs @@ -38,8 +38,8 @@ use std::{ time::Duration, }; use tokio::{io, net::UnixStream}; -#[cfg(feature = "wrapper-allocator")] -use wrapper_allocator::ALLOC; +#[cfg(feature = "tracking-allocator")] +use tracking_allocator::ALLOC; async fn recv_request(stream: &mut UnixStream) -> io::Result<(PvfPrepData, PathBuf)> { let pvf = framed_recv(stream).await?; @@ -129,12 +129,12 @@ pub fn worker_entrypoint(socket_path: &str, node_version: Option<&str>) { let prepare_thread = thread::spawn_worker_thread( "prepare thread", move || { - #[cfg(feature = "wrapper-allocator")] + #[cfg(feature = "tracking-allocator")] ALLOC.start_tracking(); let result = prepare_artifact(pvf, cpu_time_start); - #[cfg(feature = "wrapper-allocator")] + #[cfg(feature = "tracking-allocator")] { let peak = ALLOC.end_tracking(); gum::debug!( From 818699af9a222f6c82866325d6be339e67b54915 Mon Sep 17 00:00:00 2001 From: Dmitry Sinyavin Date: Mon, 22 May 2023 15:49:36 +0200 Subject: [PATCH 07/35] Add a benchmark to measure Kusama runtime preparation time --- Cargo.lock | 2 + node/core/pvf/worker/Cargo.toml | 6 ++ .../worker/benches/prepare_kusama_runtime.rs | 59 +++++++++++++++++++ 3 files changed, 67 insertions(+) create mode 100644 node/core/pvf/worker/benches/prepare_kusama_runtime.rs diff --git a/Cargo.lock b/Cargo.lock index f19efe07e454..22055f4315ab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7500,7 +7500,9 @@ version = "0.9.41" dependencies = [ "assert_matches", "cpu-time", + "criterion", "futures", + "kusama-runtime", "libc", "parity-scale-codec", "polkadot-node-core-pvf", diff --git a/node/core/pvf/worker/Cargo.toml b/node/core/pvf/worker/Cargo.toml index 775660c2ce25..b0040c4abb7b 100644 --- a/node/core/pvf/worker/Cargo.toml +++ b/node/core/pvf/worker/Cargo.toml @@ -44,8 +44,14 @@ substrate-build-script-utils = { git = "https://github.com/paritytech/substrate" [dev-dependencies] adder = { package = "test-parachain-adder", path = "../../../../parachain/test-parachains/adder" } halt = { package = "test-parachain-halt", path = "../../../../parachain/test-parachains/halt" } +criterion = { version = "0.4.0", default-features = false, features = ["cargo_bench_support"] } +kusama-runtime = { path = "../../../../runtime/kusama" } tempfile = "3.3.0" +[[bench]] +name = "prepare_kusama_runtime" +harness = false + [features] jemalloc-allocator = ["dep:tikv-jemalloc-ctl"] tracking-allocator = ["dep:tracking-allocator"] diff --git a/node/core/pvf/worker/benches/prepare_kusama_runtime.rs b/node/core/pvf/worker/benches/prepare_kusama_runtime.rs new file mode 100644 index 000000000000..d4c8e827042c --- /dev/null +++ b/node/core/pvf/worker/benches/prepare_kusama_runtime.rs @@ -0,0 +1,59 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use criterion::{criterion_group, criterion_main, Criterion, SamplingMode}; +use polkadot_node_core_pvf::PvfPrepData; +use polkadot_node_core_pvf_worker::{prepare, prevalidate}; +use polkadot_primitives::ExecutorParams; +use std::time::Duration; + +fn do_prepare_kusama_runtime(pvf: PvfPrepData) { + let blob = match prevalidate(&pvf.code()) { + Err(err) => panic!("{:?}", err), + Ok(b) => b, + }; + + match prepare(blob, &pvf.executor_params()) { + Ok(_) => (), + Err(err) => panic!("{:?}", err), + } +} + +fn prepare_kusama_runtime(c: &mut Criterion) { + let blob = kusama_runtime::WASM_BINARY.unwrap(); + let pvf = match sp_maybe_compressed_blob::decompress(&blob, 64 * 1024 * 1024) { + Ok(code) => PvfPrepData::from_code( + code.into_owned(), + ExecutorParams::default(), + Duration::from_secs(360), + ), + Err(e) => { + panic!("Cannot decompress blob: {:?}", e); + }, + }; + + let mut group = c.benchmark_group("kusama"); + group.sampling_mode(SamplingMode::Flat); + group.sample_size(20); + group.measurement_time(Duration::from_secs(240)); + group.bench_function("prepare Kusama runtime", |b| { + b.iter(|| do_prepare_kusama_runtime(pvf.clone())) + }); + group.finish(); +} + +criterion_group!(preparation, prepare_kusama_runtime); +criterion_main!(preparation); From dbdeb52d504396d7aec5ed7df4d77bb7694a7753 Mon Sep 17 00:00:00 2001 From: command-bot <> Date: Tue, 1 Aug 2023 10:28:31 +0000 Subject: [PATCH 08/35] ".git/.scripts/commands/fmt/fmt.sh" --- .../dispute-coordinator/src/initialized.rs | 101 +++++++++--------- 1 file changed, 50 insertions(+), 51 deletions(-) diff --git a/node/core/dispute-coordinator/src/initialized.rs b/node/core/dispute-coordinator/src/initialized.rs index 9ba223d2eff3..2a1d8fd4b83c 100644 --- a/node/core/dispute-coordinator/src/initialized.rs +++ b/node/core/dispute-coordinator/src/initialized.rs @@ -217,62 +217,61 @@ impl Initialized { gum::trace!(target: LOG_TARGET, "Waiting for message"); let mut overlay_db = OverlayedBackend::new(backend); let default_confirm = Box::new(|| Ok(())); - let confirm_write = match MuxedMessage::receive(ctx, &mut self.participation_receiver) - .await? - { - MuxedMessage::Participation(msg) => { - gum::trace!(target: LOG_TARGET, "MuxedMessage::Participation"); - let ParticipationStatement { - session, - candidate_hash, - candidate_receipt, - outcome, - } = self.participation.get_participation_result(ctx, msg).await?; - if let Some(valid) = outcome.validity() { - gum::trace!( - target: LOG_TARGET, - ?session, - ?candidate_hash, - ?valid, - "Issuing local statement based on participation outcome." - ); - self.issue_local_statement( - ctx, - &mut overlay_db, + let confirm_write = + match MuxedMessage::receive(ctx, &mut self.participation_receiver).await? { + MuxedMessage::Participation(msg) => { + gum::trace!(target: LOG_TARGET, "MuxedMessage::Participation"); + let ParticipationStatement { + session, candidate_hash, candidate_receipt, - session, - valid, - clock.now(), - ) - .await?; - } else { - gum::warn!(target: LOG_TARGET, ?outcome, "Dispute participation failed"); - } - default_confirm - }, - MuxedMessage::Subsystem(msg) => match msg { - FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()), - FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => { - gum::trace!(target: LOG_TARGET, "OverseerSignal::ActiveLeaves"); - self.process_active_leaves_update( - ctx, - &mut overlay_db, - update, - clock.now(), - ) - .await?; + outcome, + } = self.participation.get_participation_result(ctx, msg).await?; + if let Some(valid) = outcome.validity() { + gum::trace!( + target: LOG_TARGET, + ?session, + ?candidate_hash, + ?valid, + "Issuing local statement based on participation outcome." + ); + self.issue_local_statement( + ctx, + &mut overlay_db, + candidate_hash, + candidate_receipt, + session, + valid, + clock.now(), + ) + .await?; + } else { + gum::warn!(target: LOG_TARGET, ?outcome, "Dispute participation failed"); + } default_confirm }, - FromOrchestra::Signal(OverseerSignal::BlockFinalized(_, n)) => { - gum::trace!(target: LOG_TARGET, "OverseerSignal::BlockFinalized"); - self.scraper.process_finalized_block(&n); - default_confirm + MuxedMessage::Subsystem(msg) => match msg { + FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()), + FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => { + gum::trace!(target: LOG_TARGET, "OverseerSignal::ActiveLeaves"); + self.process_active_leaves_update( + ctx, + &mut overlay_db, + update, + clock.now(), + ) + .await?; + default_confirm + }, + FromOrchestra::Signal(OverseerSignal::BlockFinalized(_, n)) => { + gum::trace!(target: LOG_TARGET, "OverseerSignal::BlockFinalized"); + self.scraper.process_finalized_block(&n); + default_confirm + }, + FromOrchestra::Communication { msg } => + self.handle_incoming(ctx, &mut overlay_db, msg, clock.now()).await?, }, - FromOrchestra::Communication { msg } => - self.handle_incoming(ctx, &mut overlay_db, msg, clock.now()).await?, - }, - }; + }; if !overlay_db.is_empty() { let ops = overlay_db.into_write_ops(); From 1dfd9cabab14b3805f30b98d3165f4d12ad78629 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Tue, 1 Aug 2023 13:04:52 +0200 Subject: [PATCH 09/35] ` XcmContext` to `buy_weight / refund_weight` (#7563) * added XcmContext to WeightTrader trait * cargo fmt * make xcm context optional * make compile * fix compile * `XcmContext` to `buy_weight / refund_weight` --------- Co-authored-by: Patricio Napoli Co-authored-by: Giles Cope Co-authored-by: parity-processbot <> --- runtime/test-runtime/src/xcm_config.rs | 7 +++- xcm/xcm-builder/src/tests/weight.rs | 45 +++++++++++++++++++------- xcm/xcm-builder/src/weight.rs | 28 ++++++++++------ xcm/xcm-executor/src/lib.rs | 4 +-- xcm/xcm-executor/src/traits/weight.rs | 22 +++++++++---- 5 files changed, 77 insertions(+), 29 deletions(-) diff --git a/runtime/test-runtime/src/xcm_config.rs b/runtime/test-runtime/src/xcm_config.rs index 45e7956d45ba..21ce8c877dc3 100644 --- a/runtime/test-runtime/src/xcm_config.rs +++ b/runtime/test-runtime/src/xcm_config.rs @@ -80,7 +80,12 @@ impl WeightTrader for DummyWeightTrader { DummyWeightTrader } - fn buy_weight(&mut self, _weight: Weight, _payment: Assets) -> Result { + fn buy_weight( + &mut self, + _weight: Weight, + _payment: Assets, + _context: &XcmContext, + ) -> Result { Ok(Assets::default()) } } diff --git a/xcm/xcm-builder/src/tests/weight.rs b/xcm/xcm-builder/src/tests/weight.rs index 99ef029196ff..a2fb265413f5 100644 --- a/xcm/xcm-builder/src/tests/weight.rs +++ b/xcm/xcm-builder/src/tests/weight.rs @@ -24,25 +24,42 @@ fn fixed_rate_of_fungible_should_work() { } let mut trader = FixedRateOfFungible::::new(); + let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; + // supplies 100 unit of asset, 80 still remains after purchasing weight assert_eq!( - trader - .buy_weight(Weight::from_parts(10, 10), fungible_multi_asset(Here.into(), 100).into()), + trader.buy_weight( + Weight::from_parts(10, 10), + fungible_multi_asset(Here.into(), 100).into(), + &ctx, + ), Ok(fungible_multi_asset(Here.into(), 80).into()), ); // should have nothing left, as 5 + 5 = 10, and we supplied 10 units of asset. assert_eq!( - trader.buy_weight(Weight::from_parts(5, 5), fungible_multi_asset(Here.into(), 10).into()), + trader.buy_weight( + Weight::from_parts(5, 5), + fungible_multi_asset(Here.into(), 10).into(), + &ctx, + ), Ok(vec![].into()), ); // should have 5 left, as there are no proof size components assert_eq!( - trader.buy_weight(Weight::from_parts(5, 0), fungible_multi_asset(Here.into(), 10).into()), + trader.buy_weight( + Weight::from_parts(5, 0), + fungible_multi_asset(Here.into(), 10).into(), + &ctx, + ), Ok(fungible_multi_asset(Here.into(), 5).into()), ); // not enough to purchase the combined weights assert_err!( - trader.buy_weight(Weight::from_parts(5, 5), fungible_multi_asset(Here.into(), 5).into()), + trader.buy_weight( + Weight::from_parts(5, 5), + fungible_multi_asset(Here.into(), 5).into(), + &ctx, + ), XcmError::TooExpensive, ); } @@ -149,35 +166,41 @@ fn weight_trader_tuple_should_work() { ); let mut traders = Traders::new(); + let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; + // trader one buys weight assert_eq!( - traders.buy_weight(Weight::from_parts(5, 5), fungible_multi_asset(Here.into(), 10).into()), + traders.buy_weight( + Weight::from_parts(5, 5), + fungible_multi_asset(Here.into(), 10).into(), + &ctx + ), Ok(vec![].into()), ); // trader one refunds assert_eq!( - traders.refund_weight(Weight::from_parts(2, 2)), + traders.refund_weight(Weight::from_parts(2, 2), &ctx), Some(fungible_multi_asset(Here.into(), 4)) ); let mut traders = Traders::new(); // trader one failed; trader two buys weight assert_eq!( - traders.buy_weight(Weight::from_parts(5, 5), fungible_multi_asset(para_1, 10).into()), + traders.buy_weight(Weight::from_parts(5, 5), fungible_multi_asset(para_1, 10).into(), &ctx), Ok(vec![].into()), ); // trader two refunds assert_eq!( - traders.refund_weight(Weight::from_parts(2, 2)), + traders.refund_weight(Weight::from_parts(2, 2), &ctx), Some(fungible_multi_asset(para_1, 4)) ); let mut traders = Traders::new(); // all traders fails assert_err!( - traders.buy_weight(Weight::from_parts(5, 5), fungible_multi_asset(para_2, 10).into()), + traders.buy_weight(Weight::from_parts(5, 5), fungible_multi_asset(para_2, 10).into(), &ctx), XcmError::TooExpensive, ); // and no refund - assert_eq!(traders.refund_weight(Weight::from_parts(2, 2)), None); + assert_eq!(traders.refund_weight(Weight::from_parts(2, 2), &ctx), None); } diff --git a/xcm/xcm-builder/src/weight.rs b/xcm/xcm-builder/src/weight.rs index 1473775eccd8..73cba6cb557b 100644 --- a/xcm/xcm-builder/src/weight.rs +++ b/xcm/xcm-builder/src/weight.rs @@ -140,11 +140,16 @@ impl, R: TakeRevenue> WeightTrader for FixedRateOf Self(Weight::zero(), 0, PhantomData) } - fn buy_weight(&mut self, weight: Weight, payment: Assets) -> Result { + fn buy_weight( + &mut self, + weight: Weight, + payment: Assets, + context: &XcmContext, + ) -> Result { log::trace!( target: "xcm::weight", - "FixedRateOfFungible::buy_weight weight: {:?}, payment: {:?}", - weight, payment, + "FixedRateOfFungible::buy_weight weight: {:?}, payment: {:?}, context: {:?}", + weight, payment, context, ); let (id, units_per_second, units_per_mb) = T::get(); let amount = (units_per_second * (weight.ref_time() as u128) / @@ -160,8 +165,8 @@ impl, R: TakeRevenue> WeightTrader for FixedRateOf Ok(unused) } - fn refund_weight(&mut self, weight: Weight) -> Option { - log::trace!(target: "xcm::weight", "FixedRateOfFungible::refund_weight weight: {:?}", weight); + fn refund_weight(&mut self, weight: Weight, context: &XcmContext) -> Option { + log::trace!(target: "xcm::weight", "FixedRateOfFungible::refund_weight weight: {:?}, context: {:?}", weight, context); let (id, units_per_second, units_per_mb) = T::get(); let weight = weight.min(self.0); let amount = (units_per_second * (weight.ref_time() as u128) / @@ -210,8 +215,13 @@ impl< Self(Weight::zero(), Zero::zero(), PhantomData) } - fn buy_weight(&mut self, weight: Weight, payment: Assets) -> Result { - log::trace!(target: "xcm::weight", "UsingComponents::buy_weight weight: {:?}, payment: {:?}", weight, payment); + fn buy_weight( + &mut self, + weight: Weight, + payment: Assets, + context: &XcmContext, + ) -> Result { + log::trace!(target: "xcm::weight", "UsingComponents::buy_weight weight: {:?}, payment: {:?}, context: {:?}", weight, payment, context); let amount = WeightToFee::weight_to_fee(&weight); let u128_amount: u128 = amount.try_into().map_err(|_| XcmError::Overflow)?; let required = (Concrete(AssetId::get()), u128_amount).into(); @@ -221,8 +231,8 @@ impl< Ok(unused) } - fn refund_weight(&mut self, weight: Weight) -> Option { - log::trace!(target: "xcm::weight", "UsingComponents::refund_weight weight: {:?}", weight); + fn refund_weight(&mut self, weight: Weight, context: &XcmContext) -> Option { + log::trace!(target: "xcm::weight", "UsingComponents::refund_weight weight: {:?}, context: {:?}", weight, context); let weight = weight.min(self.0); let amount = WeightToFee::weight_to_fee(&weight); self.0 -= weight; diff --git a/xcm/xcm-executor/src/lib.rs b/xcm/xcm-executor/src/lib.rs index 050d73837085..57ddc4322923 100644 --- a/xcm/xcm-executor/src/lib.rs +++ b/xcm/xcm-executor/src/lib.rs @@ -457,7 +457,7 @@ impl XcmExecutor { let current_surplus = self.total_surplus.saturating_sub(self.total_refunded); if current_surplus.any_gt(Weight::zero()) { self.total_refunded.saturating_accrue(current_surplus); - if let Some(w) = self.trader.refund_weight(current_surplus) { + if let Some(w) = self.trader.refund_weight(current_surplus, &self.context) { self.subsume_asset(w)?; } } @@ -689,7 +689,7 @@ impl XcmExecutor { // pay for `weight` using up to `fees` of the holding register. let max_fee = self.holding.try_take(fees.into()).map_err(|_| XcmError::NotHoldingFees)?; - let unspent = self.trader.buy_weight(weight, max_fee)?; + let unspent = self.trader.buy_weight(weight, max_fee, &self.context)?; self.subsume_assets(unspent)?; } Ok(()) diff --git a/xcm/xcm-executor/src/traits/weight.rs b/xcm/xcm-executor/src/traits/weight.rs index 033614d4bf61..06e6b5f55bce 100644 --- a/xcm/xcm-executor/src/traits/weight.rs +++ b/xcm/xcm-executor/src/traits/weight.rs @@ -49,13 +49,18 @@ pub trait WeightTrader: Sized { /// Purchase execution weight credit in return for up to a given `payment`. If less of the /// payment is required then the surplus is returned. If the `payment` cannot be used to pay /// for the `weight`, then an error is returned. - fn buy_weight(&mut self, weight: Weight, payment: Assets) -> Result; + fn buy_weight( + &mut self, + weight: Weight, + payment: Assets, + context: &XcmContext, + ) -> Result; /// Attempt a refund of `weight` into some asset. The caller does not guarantee that the weight was /// purchased using `buy_weight`. /// /// Default implementation refunds nothing. - fn refund_weight(&mut self, _weight: Weight) -> Option { + fn refund_weight(&mut self, _weight: Weight, _context: &XcmContext) -> Option { None } } @@ -66,11 +71,16 @@ impl WeightTrader for Tuple { for_tuples!( ( #( Tuple::new() ),* ) ) } - fn buy_weight(&mut self, weight: Weight, payment: Assets) -> Result { + fn buy_weight( + &mut self, + weight: Weight, + payment: Assets, + context: &XcmContext, + ) -> Result { let mut too_expensive_error_found = false; let mut last_error = None; for_tuples!( #( - match Tuple.buy_weight(weight, payment.clone()) { + match Tuple.buy_weight(weight, payment.clone(), context) { Ok(assets) => return Ok(assets), Err(e) => { if let XcmError::TooExpensive = e { @@ -92,9 +102,9 @@ impl WeightTrader for Tuple { }) } - fn refund_weight(&mut self, weight: Weight) -> Option { + fn refund_weight(&mut self, weight: Weight, context: &XcmContext) -> Option { for_tuples!( #( - if let Some(asset) = Tuple.refund_weight(weight) { + if let Some(asset) = Tuple.refund_weight(weight, context) { return Some(asset); } )* ); From 152888f2f484fc115758df035672b7ec1a9a5507 Mon Sep 17 00:00:00 2001 From: eskimor Date: Tue, 1 Aug 2023 14:43:54 +0200 Subject: [PATCH 10/35] Take into account size as well in weight limiting. (#7369) * Take into account size as well in weight limiting. * Fix logging. * More logs. * Remove randomized selection in provisioner No longer supported by runtime. * Fix and simplify weight calculation. Random filtering of remote disputes got dropped. * Make existing tests pass. * Tests for size limiting. * Fix provisioner. * Remove rand dependency. * Better default block length for tests. * ".git/.scripts/commands/bench/bench.sh" runtime kusama runtime_parachains::paras_inherent * ".git/.scripts/commands/bench/bench.sh" runtime polkadot runtime_parachains::paras_inherent * ".git/.scripts/commands/bench/bench.sh" runtime westend runtime_parachains::paras_inherent * Update runtime/parachains/src/paras_inherent/mod.rs Co-authored-by: Tsvetomir Dimitrov * Update runtime/parachains/src/paras_inherent/mod.rs Co-authored-by: Chris Sosnin <48099298+slumber@users.noreply.github.com> * Add back missing line. * Fix test. * fmt fix. * Add missing test annotation --------- Co-authored-by: eskimor Co-authored-by: command-bot <> Co-authored-by: Tsvetomir Dimitrov Co-authored-by: Chris Sosnin <48099298+slumber@users.noreply.github.com> --- Cargo.lock | 1 - node/core/provisioner/Cargo.toml | 1 - node/core/provisioner/src/disputes/mod.rs | 9 +- .../src/disputes/random_selection/mod.rs | 178 -------------- node/core/provisioner/src/lib.rs | 21 +- primitives/src/v5/mod.rs | 2 +- .../runtime_parachains_paras_inherent.rs | 41 ++-- runtime/parachains/src/mock.rs | 8 +- runtime/parachains/src/paras_inherent/mod.rs | 166 ++++++------- .../parachains/src/paras_inherent/tests.rs | 225 ++++++++++++++++-- .../parachains/src/paras_inherent/weights.rs | 106 ++++++--- .../runtime_parachains_paras_inherent.rs | 41 ++-- .../runtime_parachains_paras_inherent.rs | 41 ++-- 13 files changed, 438 insertions(+), 402 deletions(-) delete mode 100644 node/core/provisioner/src/disputes/random_selection/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 03e9da5df311..1037808b8e94 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6984,7 +6984,6 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", - "rand 0.8.5", "sp-application-crypto", "sp-keystore", "thiserror", diff --git a/node/core/provisioner/Cargo.toml b/node/core/provisioner/Cargo.toml index c6d78582cfc9..7c07118f1f3f 100644 --- a/node/core/provisioner/Cargo.toml +++ b/node/core/provisioner/Cargo.toml @@ -13,7 +13,6 @@ polkadot-primitives = { path = "../../../primitives" } polkadot-node-primitives = { path = "../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } -rand = "0.8.5" futures-timer = "3.0.2" fatality = "0.0.6" diff --git a/node/core/provisioner/src/disputes/mod.rs b/node/core/provisioner/src/disputes/mod.rs index 4fcfa5b330cb..fab70a054698 100644 --- a/node/core/provisioner/src/disputes/mod.rs +++ b/node/core/provisioner/src/disputes/mod.rs @@ -14,12 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! The disputes module is responsible for selecting dispute votes to be sent with the inherent data. It contains two -//! different implementations, extracted in two separate modules - `random_selection` and `prioritized_selection`. Which -//! implementation will be executed depends on the version of the runtime. Runtime v2 supports `random_selection`. Runtime -//! `v3` and above - `prioritized_selection`. The entrypoint to these implementations is the `select_disputes` function. -//! `prioritized_selection` is considered superior and will be the default one in the future. Refer to the documentation of -//! the modules for more details about each implementation. +//! The disputes module is responsible for selecting dispute votes to be sent with the inherent data. use crate::LOG_TARGET; use futures::channel::oneshot; @@ -49,5 +44,3 @@ async fn request_votes( } pub(crate) mod prioritized_selection; - -pub(crate) mod random_selection; diff --git a/node/core/provisioner/src/disputes/random_selection/mod.rs b/node/core/provisioner/src/disputes/random_selection/mod.rs deleted file mode 100644 index 06d4ef34b665..000000000000 --- a/node/core/provisioner/src/disputes/random_selection/mod.rs +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! This module selects all RECENT disputes, fetches the votes for them from dispute-coordinator and -//! returns them as `MultiDisputeStatementSet`. If the RECENT disputes are more than -//! `MAX_DISPUTES_FORWARDED_TO_RUNTIME` constant - the ACTIVE disputes plus a random selection of -//! RECENT disputes (up to `MAX_DISPUTES_FORWARDED_TO_RUNTIME`) are returned instead. -//! If the ACTIVE disputes are also above `MAX_DISPUTES_FORWARDED_TO_RUNTIME` limit - a random selection -//! of them is generated. - -use crate::{metrics, LOG_TARGET}; -use futures::channel::oneshot; -use polkadot_node_subsystem::{messages::DisputeCoordinatorMessage, overseer}; -use polkadot_primitives::{ - CandidateHash, DisputeStatement, DisputeStatementSet, MultiDisputeStatementSet, SessionIndex, -}; -use std::collections::HashSet; - -/// The maximum number of disputes Provisioner will include in the inherent data. -/// Serves as a protection not to flood the Runtime with excessive data. -const MAX_DISPUTES_FORWARDED_TO_RUNTIME: usize = 1_000; - -#[derive(Debug)] -enum RequestType { - /// Query recent disputes, could be an excessive amount. - Recent, - /// Query the currently active and very recently concluded disputes. - Active, -} - -/// Request open disputes identified by `CandidateHash` and the `SessionIndex`. -/// Returns only confirmed/concluded disputes. The rest are filtered out. -async fn request_confirmed_disputes( - sender: &mut impl overseer::ProvisionerSenderTrait, - active_or_recent: RequestType, -) -> Vec<(SessionIndex, CandidateHash)> { - let (tx, rx) = oneshot::channel(); - let msg = match active_or_recent { - RequestType::Recent => DisputeCoordinatorMessage::RecentDisputes(tx), - RequestType::Active => DisputeCoordinatorMessage::ActiveDisputes(tx), - }; - - sender.send_unbounded_message(msg); - let disputes = match rx.await { - Ok(r) => r, - Err(oneshot::Canceled) => { - gum::warn!( - target: LOG_TARGET, - "Channel closed: unable to gather {:?} disputes", - active_or_recent - ); - Vec::new() - }, - }; - - disputes - .into_iter() - .filter(|d| d.2.is_confirmed_concluded()) - .map(|d| (d.0, d.1)) - .collect() -} - -/// Extend `acc` by `n` random, picks of not-yet-present in `acc` items of `recent` without repetition and additions of recent. -fn extend_by_random_subset_without_repetition( - acc: &mut Vec<(SessionIndex, CandidateHash)>, - extension: Vec<(SessionIndex, CandidateHash)>, - n: usize, -) { - use rand::Rng; - - let lut = acc.iter().cloned().collect::>(); - - let mut unique_new = - extension.into_iter().filter(|recent| !lut.contains(recent)).collect::>(); - - // we can simply add all - if unique_new.len() <= n { - acc.extend(unique_new) - } else { - acc.reserve(n); - let mut rng = rand::thread_rng(); - for _ in 0..n { - let idx = rng.gen_range(0..unique_new.len()); - acc.push(unique_new.swap_remove(idx)); - } - } - // assure sorting stays candid according to session index - acc.sort_unstable_by(|a, b| a.0.cmp(&b.0)); -} - -pub async fn select_disputes( - sender: &mut Sender, - metrics: &metrics::Metrics, -) -> MultiDisputeStatementSet -where - Sender: overseer::ProvisionerSenderTrait, -{ - gum::trace!(target: LOG_TARGET, "Selecting disputes for inherent data using random selection"); - - // We use `RecentDisputes` instead of `ActiveDisputes` because redundancy is fine. - // It's heavier than `ActiveDisputes` but ensures that everything from the dispute - // window gets on-chain, unlike `ActiveDisputes`. - // In case of an overload condition, we limit ourselves to active disputes, and fill up to the - // upper bound of disputes to pass to wasm `fn create_inherent_data`. - // If the active ones are already exceeding the bounds, randomly select a subset. - let recent = request_confirmed_disputes(sender, RequestType::Recent).await; - let disputes = if recent.len() > MAX_DISPUTES_FORWARDED_TO_RUNTIME { - gum::warn!( - target: LOG_TARGET, - "Recent disputes are excessive ({} > {}), reduce to active ones, and selected", - recent.len(), - MAX_DISPUTES_FORWARDED_TO_RUNTIME - ); - let mut active = request_confirmed_disputes(sender, RequestType::Active).await; - let n_active = active.len(); - let active = if active.len() > MAX_DISPUTES_FORWARDED_TO_RUNTIME { - let mut picked = Vec::with_capacity(MAX_DISPUTES_FORWARDED_TO_RUNTIME); - extend_by_random_subset_without_repetition( - &mut picked, - active, - MAX_DISPUTES_FORWARDED_TO_RUNTIME, - ); - picked - } else { - extend_by_random_subset_without_repetition( - &mut active, - recent, - MAX_DISPUTES_FORWARDED_TO_RUNTIME.saturating_sub(n_active), - ); - active - }; - active - } else { - recent - }; - - // Load all votes for all disputes from the coordinator. - let dispute_candidate_votes = super::request_votes(sender, disputes).await; - - // Transform all `CandidateVotes` into `MultiDisputeStatementSet`. - dispute_candidate_votes - .into_iter() - .map(|(session_index, candidate_hash, votes)| { - let valid_statements = votes - .valid - .into_iter() - .map(|(i, (s, sig))| (DisputeStatement::Valid(s), i, sig)); - - let invalid_statements = votes - .invalid - .into_iter() - .map(|(i, (s, sig))| (DisputeStatement::Invalid(s), i, sig)); - - metrics.inc_valid_statements_by(valid_statements.len()); - metrics.inc_invalid_statements_by(invalid_statements.len()); - metrics.inc_dispute_statement_sets_by(1); - - DisputeStatementSet { - candidate_hash, - session: session_index, - statements: valid_statements.chain(invalid_statements).collect(), - } - }) - .collect() -} diff --git a/node/core/provisioner/src/lib.rs b/node/core/provisioner/src/lib.rs index 50254ce9bef9..3ae297fee736 100644 --- a/node/core/provisioner/src/lib.rs +++ b/node/core/provisioner/src/lib.rs @@ -393,16 +393,17 @@ async fn send_inherent_data( "Selecting disputes" ); - let disputes = match has_required_runtime( - from_job, - leaf.hash, - PRIORITIZED_SELECTION_RUNTIME_VERSION_REQUIREMENT, - ) - .await - { - true => disputes::prioritized_selection::select_disputes(from_job, metrics, leaf).await, - false => disputes::random_selection::select_disputes(from_job, metrics).await, - }; + debug_assert!( + has_required_runtime( + from_job, + leaf.hash, + PRIORITIZED_SELECTION_RUNTIME_VERSION_REQUIREMENT, + ) + .await, + "randomized selection no longer supported, please upgrade your runtime!" + ); + + let disputes = disputes::prioritized_selection::select_disputes(from_job, metrics, leaf).await; gum::trace!( target: LOG_TARGET, diff --git a/primitives/src/v5/mod.rs b/primitives/src/v5/mod.rs index 6c6258b2b805..3498c0762d4c 100644 --- a/primitives/src/v5/mod.rs +++ b/primitives/src/v5/mod.rs @@ -1372,7 +1372,7 @@ impl AsRef for DisputeStatementSet { pub type MultiDisputeStatementSet = Vec; /// A _checked_ set of dispute statements. -#[derive(Clone, PartialEq, RuntimeDebug)] +#[derive(Clone, PartialEq, RuntimeDebug, Encode)] pub struct CheckedDisputeStatementSet(DisputeStatementSet); impl AsRef for CheckedDisputeStatementSet { diff --git a/runtime/kusama/src/weights/runtime_parachains_paras_inherent.rs b/runtime/kusama/src/weights/runtime_parachains_paras_inherent.rs index 639164af522b..9a9a3a3dffb5 100644 --- a/runtime/kusama/src/weights/runtime_parachains_paras_inherent.rs +++ b/runtime/kusama/src/weights/runtime_parachains_paras_inherent.rs @@ -17,27 +17,26 @@ //! Autogenerated weights for `runtime_parachains::paras_inherent` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-xerhrdyb-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot +// target/production/polkadot // benchmark // pallet -// --chain=kusama-dev // --steps=50 // --repeat=20 -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --pallet=runtime_parachains::paras_inherent // --extrinsic=* // --execution=wasm // --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=runtime_parachains::paras_inherent +// --chain=kusama-dev // --header=./file_header.txt -// --output=./runtime/kusama/src/weights/runtime_parachains_paras_inherent.rs +// --output=./runtime/kusama/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -115,11 +114,11 @@ impl runtime_parachains::paras_inherent::WeightInfo for // Proof Size summary in bytes: // Measured: `50671` // Estimated: `56611 + v * (23 ±0)` - // Minimum execution time: 982_516_000 picoseconds. - Weight::from_parts(453_473_787, 0) + // Minimum execution time: 1_008_586_000 picoseconds. + Weight::from_parts(471_892_709, 0) .saturating_add(Weight::from_parts(0, 56611)) - // Standard Error: 21_034 - .saturating_add(Weight::from_parts(57_212_691, 0).saturating_mul(v.into())) + // Standard Error: 15_634 + .saturating_add(Weight::from_parts(56_433_120, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(27)) .saturating_add(T::DbWeight::get().writes(15)) .saturating_add(Weight::from_parts(0, 23).saturating_mul(v.into())) @@ -186,8 +185,8 @@ impl runtime_parachains::paras_inherent::WeightInfo for // Proof Size summary in bytes: // Measured: `42504` // Estimated: `48444` - // Minimum execution time: 465_674_000 picoseconds. - Weight::from_parts(483_678_000, 0) + // Minimum execution time: 469_409_000 picoseconds. + Weight::from_parts(487_865_000, 0) .saturating_add(Weight::from_parts(0, 48444)) .saturating_add(T::DbWeight::get().reads(25)) .saturating_add(T::DbWeight::get().writes(16)) @@ -259,11 +258,11 @@ impl runtime_parachains::paras_inherent::WeightInfo for // Proof Size summary in bytes: // Measured: `42540` // Estimated: `48480` - // Minimum execution time: 6_886_272_000 picoseconds. - Weight::from_parts(1_235_371_688, 0) + // Minimum execution time: 6_874_816_000 picoseconds. + Weight::from_parts(1_229_912_739, 0) .saturating_add(Weight::from_parts(0, 48480)) - // Standard Error: 28_012 - .saturating_add(Weight::from_parts(56_395_511, 0).saturating_mul(v.into())) + // Standard Error: 27_352 + .saturating_add(Weight::from_parts(56_137_302, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(28)) .saturating_add(T::DbWeight::get().writes(15)) } @@ -337,8 +336,8 @@ impl runtime_parachains::paras_inherent::WeightInfo for // Proof Size summary in bytes: // Measured: `42567` // Estimated: `48507` - // Minimum execution time: 42_215_280_000 picoseconds. - Weight::from_parts(43_255_598_000, 0) + // Minimum execution time: 41_075_073_000 picoseconds. + Weight::from_parts(43_753_587_000, 0) .saturating_add(Weight::from_parts(0, 48507)) .saturating_add(T::DbWeight::get().reads(30)) .saturating_add(T::DbWeight::get().writes(15)) diff --git a/runtime/parachains/src/mock.rs b/runtime/parachains/src/mock.rs index 396407d5c3d3..bab896c419f6 100644 --- a/runtime/parachains/src/mock.rs +++ b/runtime/parachains/src/mock.rs @@ -32,6 +32,7 @@ use frame_support::{ weights::{Weight, WeightMeter}, }; use frame_support_test::TestRandomness; +use frame_system::limits; use parity_scale_codec::Decode; use primitives::{ AuthorityDiscoveryId, Balance, BlockNumber, CandidateHash, Moment, SessionIndex, UpwardMessage, @@ -42,7 +43,7 @@ use sp_io::TestExternalities; use sp_runtime::{ traits::{AccountIdConversion, BlakeTwo256, IdentityLookup}, transaction_validity::TransactionPriority, - BuildStorage, Permill, + BuildStorage, Perbill, Permill, }; use std::{cell::RefCell, collections::HashMap}; @@ -81,10 +82,11 @@ where parameter_types! { pub const BlockHashCount: u32 = 250; - pub BlockWeights: frame_system::limits::BlockWeights = + pub static BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max( Weight::from_parts(4 * 1024 * 1024, u64::MAX), ); + pub static BlockLength: limits::BlockLength = limits::BlockLength::max_with_normal_ratio(u32::MAX, Perbill::from_percent(75)); } pub type AccountId = u64; @@ -92,7 +94,7 @@ pub type AccountId = u64; impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = BlockWeights; - type BlockLength = (); + type BlockLength = BlockLength; type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; diff --git a/runtime/parachains/src/paras_inherent/mod.rs b/runtime/parachains/src/paras_inherent/mod.rs index 0afab5e58d95..61be0d4adae8 100644 --- a/runtime/parachains/src/paras_inherent/mod.rs +++ b/runtime/parachains/src/paras_inherent/mod.rs @@ -53,7 +53,6 @@ use rand::{seq::SliceRandom, SeedableRng}; use scale_info::TypeInfo; use sp_runtime::traits::{Header as HeaderT, One}; use sp_std::{ - cmp::Ordering, collections::{btree_map::BTreeMap, btree_set::BTreeSet}, prelude::*, vec::Vec, @@ -62,12 +61,13 @@ use sp_std::{ mod misc; mod weights; +use self::weights::checked_multi_dispute_statement_sets_weight; pub use self::{ misc::{IndexedRetain, IsSortedBy}, weights::{ backed_candidate_weight, backed_candidates_weight, dispute_statement_set_weight, - multi_dispute_statement_sets_weight, paras_inherent_total_weight, signed_bitfields_weight, - TestWeightInfo, WeightInfo, + multi_dispute_statement_sets_weight, paras_inherent_total_weight, signed_bitfield_weight, + signed_bitfields_weight, TestWeightInfo, WeightInfo, }, }; @@ -264,8 +264,8 @@ pub mod pallet { #[pallet::weight(( paras_inherent_total_weight::( data.backed_candidates.as_slice(), - data.bitfields.as_slice(), - data.disputes.as_slice(), + &data.bitfields, + &data.disputes, ), DispatchClass::Mandatory, ))] @@ -356,17 +356,47 @@ impl Pallet { let now = >::block_number(); let candidates_weight = backed_candidates_weight::(&backed_candidates); - let bitfields_weight = signed_bitfields_weight::(bitfields.len()); - let disputes_weight = multi_dispute_statement_sets_weight::(&disputes); - let max_block_weight = ::BlockWeights::get().max_block; + let bitfields_weight = signed_bitfields_weight::(&bitfields); + let disputes_weight = multi_dispute_statement_sets_weight::(&disputes); - METRICS - .on_before_filter((candidates_weight + bitfields_weight + disputes_weight).ref_time()); + let all_weight_before = candidates_weight + bitfields_weight + disputes_weight; + + METRICS.on_before_filter(all_weight_before.ref_time()); + log::debug!(target: LOG_TARGET, "Size before filter: {}, candidates + bitfields: {}, disputes: {}", all_weight_before.proof_size(), candidates_weight.proof_size() + bitfields_weight.proof_size(), disputes_weight.proof_size()); + log::debug!(target: LOG_TARGET, "Time weight before filter: {}, candidates + bitfields: {}, disputes: {}", all_weight_before.ref_time(), candidates_weight.ref_time() + bitfields_weight.ref_time(), disputes_weight.ref_time()); let current_session = >::session_index(); let expected_bits = >::availability_cores().len(); let validator_public = shared::Pallet::::active_validator_keys(); + // We are assuming (incorrectly) to have all the weight (for the mandatory class or even + // full block) available to us. This can lead to slightly overweight blocks, which still + // works as the dispatch class for `enter` is `Mandatory`. By using the `Mandatory` + // dispatch class, the upper layers impose no limit on the weight of this inherent, instead + // we limit ourselves and make sure to stay within reasonable bounds. It might make sense + // to subtract BlockWeights::base_block to reduce chances of becoming overweight. + let max_block_weight = { + let dispatch_class = DispatchClass::Mandatory; + let max_block_weight_full = ::BlockWeights::get(); + log::debug!(target: LOG_TARGET, "Max block weight: {}", max_block_weight_full.max_block); + // Get max block weight for the mandatory class if defined, otherwise total max weight of + // the block. + let max_weight = max_block_weight_full + .per_class + .get(dispatch_class) + .max_total + .unwrap_or(max_block_weight_full.max_block); + log::debug!(target: LOG_TARGET, "Used max block time weight: {}", max_weight); + + let max_block_size_full = ::BlockLength::get(); + let max_block_size = max_block_size_full.max.get(dispatch_class); + log::debug!(target: LOG_TARGET, "Used max block size: {}", max_block_size); + + // Adjust proof size to max block size as we are tracking tx size. + max_weight.set_proof_size(*max_block_size as u64) + }; + log::debug!(target: LOG_TARGET, "Used max block weight: {}", max_block_weight); + let entropy = compute_entropy::(parent_hash); let mut rng = rand_chacha::ChaChaRng::from_seed(entropy.into()); @@ -388,10 +418,9 @@ impl Pallet { disputes, dispute_statement_set_valid, max_block_weight, - &mut rng, ); - let full_weight = if context == ProcessInherentDataContext::ProvideInherent { + let all_weight_after = if context == ProcessInherentDataContext::ProvideInherent { // Assure the maximum block weight is adhered, by limiting bitfields and backed // candidates. Dispute statement sets were already limited before. let non_disputes_weight = apply_weight_limit::( @@ -401,36 +430,38 @@ impl Pallet { &mut rng, ); - let full_weight = + let all_weight_after = non_disputes_weight.saturating_add(checked_disputes_sets_consumed_weight); - METRICS.on_after_filter(full_weight.ref_time()); + METRICS.on_after_filter(all_weight_after.ref_time()); + log::debug!( + target: LOG_TARGET, + "[process_inherent_data] after filter: bitfields.len(): {}, backed_candidates.len(): {}, checked_disputes_sets.len() {}", + bitfields.len(), + backed_candidates.len(), + checked_disputes_sets.len() + ); + log::debug!(target: LOG_TARGET, "Size after filter: {}, candidates + bitfields: {}, disputes: {}", all_weight_after.proof_size(), non_disputes_weight.proof_size(), checked_disputes_sets_consumed_weight.proof_size()); + log::debug!(target: LOG_TARGET, "Time weight after filter: {}, candidates + bitfields: {}, disputes: {}", all_weight_after.ref_time(), non_disputes_weight.ref_time(), checked_disputes_sets_consumed_weight.ref_time()); - if full_weight.any_gt(max_block_weight) { - log::warn!(target: LOG_TARGET, "Post weight limiting weight is still too large."); + if all_weight_after.any_gt(max_block_weight) { + log::warn!(target: LOG_TARGET, "Post weight limiting weight is still too large, time: {}, size: {}", all_weight_after.ref_time(), all_weight_after.proof_size()); } - - full_weight + all_weight_after } else { - // We compute the weight for the unfiltered disputes for a stronger check, since `create_inherent` - // should already have filtered them out in block authorship. - let full_weight = candidates_weight - .saturating_add(bitfields_weight) - .saturating_add(disputes_weight); - // This check is performed in the context of block execution. Ensures inherent weight invariants guaranteed // by `create_inherent_data` for block authorship. - if full_weight.any_gt(max_block_weight) { + if all_weight_before.any_gt(max_block_weight) { log::error!( "Overweight para inherent data reached the runtime {:?}: {} > {}", parent_hash, - full_weight, + all_weight_before, max_block_weight ); } - ensure!(full_weight.all_lte(max_block_weight), Error::::InherentOverweight); - full_weight + ensure!(all_weight_before.all_lte(max_block_weight), Error::::InherentOverweight); + all_weight_before }; // Note that `process_checked_multi_dispute_data` will iterate and import each @@ -597,7 +628,7 @@ impl Pallet { let processed = ParachainsInherentData { bitfields, backed_candidates, disputes, parent_header }; - Ok((processed, Some(full_weight).into())) + Ok((processed, Some(all_weight_after).into())) } } @@ -702,7 +733,7 @@ fn apply_weight_limit( ) -> Weight { let total_candidates_weight = backed_candidates_weight::(candidates.as_slice()); - let total_bitfields_weight = signed_bitfields_weight::(bitfields.len()); + let total_bitfields_weight = signed_bitfields_weight::(&bitfields); let total = total_bitfields_weight.saturating_add(total_candidates_weight); @@ -734,6 +765,7 @@ fn apply_weight_limit( |c| backed_candidate_weight::(c), max_consumable_by_candidates, ); + log::debug!(target: LOG_TARGET, "Indices Candidates: {:?}, size: {}", indices, candidates.len()); candidates.indexed_retain(|idx, _backed_candidate| indices.binary_search(&idx).is_ok()); // pick all bitfields, and // fill the remaining space with candidates @@ -750,9 +782,10 @@ fn apply_weight_limit( rng, &bitfields, vec![], - |_| <::WeightInfo as WeightInfo>::enter_bitfields(), + |bitfield| signed_bitfield_weight::(&bitfield), max_consumable_weight, ); + log::debug!(target: LOG_TARGET, "Indices Bitfields: {:?}, size: {}", indices, bitfields.len()); bitfields.indexed_retain(|idx, _bitfield| indices.binary_search(&idx).is_ok()); @@ -941,94 +974,41 @@ fn compute_entropy(parent_hash: T::Hash) -> [u8; 32] { /// 2. If exceeded: /// 1. Check validity of all dispute statements sequentially /// 2. If not exceeded: -/// 1. Sort the disputes based on locality and age, locality first. -/// 1. Split the array -/// 1. Prefer local ones over remote disputes /// 1. If weight is exceeded by locals, pick the older ones (lower indices) /// until the weight limit is reached. -/// 1. If weight is exceeded by locals and remotes, pick remotes -/// randomly and check validity one by one. /// /// Returns the consumed weight amount, that is guaranteed to be less than the provided `max_consumable_weight`. fn limit_and_sanitize_disputes< T: Config, CheckValidityFn: FnMut(DisputeStatementSet) -> Option, >( - mut disputes: MultiDisputeStatementSet, + disputes: MultiDisputeStatementSet, mut dispute_statement_set_valid: CheckValidityFn, max_consumable_weight: Weight, - rng: &mut rand_chacha::ChaChaRng, ) -> (Vec, Weight) { // The total weight if all disputes would be included - let disputes_weight = multi_dispute_statement_sets_weight::(&disputes); + let disputes_weight = multi_dispute_statement_sets_weight::(&disputes); if disputes_weight.any_gt(max_consumable_weight) { + log::debug!(target: LOG_TARGET, "Above max consumable weight: {}/{}", disputes_weight, max_consumable_weight); let mut checked_acc = Vec::::with_capacity(disputes.len()); - // Since the disputes array is sorted, we may use binary search to find the beginning of - // remote disputes - let idx = disputes - .binary_search_by(|probe| { - if T::DisputesHandler::included_state(probe.session, probe.candidate_hash).is_some() - { - Ordering::Less - } else { - Ordering::Greater - } - }) - // The above predicate will never find an item and therefore we are guaranteed to obtain - // an error, which we can safely unwrap. QED. - .unwrap_err(); - - // Due to the binary search predicate above, the index computed will constitute the beginning - // of the remote disputes sub-array `[Local, Local, Local, ^Remote, Remote]`. - let remote_disputes = disputes.split_off(idx); - // Accumualated weight of all disputes picked, that passed the checks. let mut weight_acc = Weight::zero(); // Select disputes in-order until the remaining weight is attained - disputes.iter().for_each(|dss| { - let dispute_weight = <::WeightInfo as WeightInfo>::enter_variable_disputes( - dss.statements.len() as u32, - ); + disputes.into_iter().for_each(|dss| { + let dispute_weight = dispute_statement_set_weight::(&dss); let updated = weight_acc.saturating_add(dispute_weight); if max_consumable_weight.all_gte(updated) { - // only apply the weight if the validity check passes - if let Some(checked) = dispute_statement_set_valid(dss.clone()) { + // Always apply the weight. Invalid data cost processing time too: + weight_acc = updated; + if let Some(checked) = dispute_statement_set_valid(dss) { checked_acc.push(checked); - weight_acc = updated; } } }); - // Compute the statements length of all remote disputes - let d = remote_disputes.iter().map(|d| d.statements.len() as u32).collect::>(); - - // Select remote disputes at random until the block is full - let (_acc_remote_disputes_weight, mut indices) = random_sel::( - rng, - &d, - vec![], - |v| <::WeightInfo as WeightInfo>::enter_variable_disputes(*v), - max_consumable_weight.saturating_sub(weight_acc), - ); - - // Sort the indices, to retain the same sorting as the input. - indices.sort(); - - // Add the remote disputes after checking their validity. - checked_acc.extend(indices.into_iter().filter_map(|idx| { - dispute_statement_set_valid(remote_disputes[idx].clone()).map(|cdss| { - let weight = <::WeightInfo as WeightInfo>::enter_variable_disputes( - cdss.as_ref().statements.len() as u32, - ); - weight_acc = weight_acc.saturating_add(weight); - cdss - }) - })); - - // Update the remaining weight (checked_acc, weight_acc) } else { // Go through all of them, and just apply the filter, they would all fit @@ -1037,7 +1017,7 @@ fn limit_and_sanitize_disputes< .filter_map(|dss| dispute_statement_set_valid(dss)) .collect::>(); // some might have been filtered out, so re-calc the weight - let checked_disputes_weight = multi_dispute_statement_sets_weight::(&checked); + let checked_disputes_weight = checked_multi_dispute_statement_sets_weight::(&checked); (checked, checked_disputes_weight) } } diff --git a/runtime/parachains/src/paras_inherent/tests.rs b/runtime/parachains/src/paras_inherent/tests.rs index 0098814c8aca..c2e80e7525fb 100644 --- a/runtime/parachains/src/paras_inherent/tests.rs +++ b/runtime/parachains/src/paras_inherent/tests.rs @@ -21,13 +21,16 @@ use super::*; // weights for limiting data will fail, so we don't run them when using the benchmark feature. #[cfg(not(feature = "runtime-benchmarks"))] mod enter { + use super::*; use crate::{ builder::{Bench, BenchBuilder}, - mock::{new_test_ext, MockGenesisConfig, Test}, + mock::{new_test_ext, BlockLength, BlockWeights, MockGenesisConfig, Test}, }; use assert_matches::assert_matches; use frame_support::assert_ok; + use frame_system::limits; + use sp_runtime::Perbill; use sp_std::collections::btree_map::BTreeMap; struct TestConfig { @@ -300,6 +303,7 @@ mod enter { // Ensure that when dispute data establishes an over weight block that we adequately // filter out disputes according to our prioritization rule fn limit_dispute_data() { + sp_tracing::try_init_simple(); new_test_ext(MockGenesisConfig::default()).execute_with(|| { // Create the inherent data for this block let dispute_statements = BTreeMap::new(); @@ -486,7 +490,8 @@ mod enter { assert_ne!(limit_inherent_data, expected_para_inherent_data); assert!(inherent_data_weight(&limit_inherent_data) .all_lte(inherent_data_weight(&expected_para_inherent_data))); - assert!(inherent_data_weight(&limit_inherent_data).all_lte(max_block_weight())); + assert!(inherent_data_weight(&limit_inherent_data) + .all_lte(max_block_weight_proof_size_adjusted())); // Three disputes is over weight (see previous test), so we expect to only see 2 disputes assert_eq!(limit_inherent_data.disputes.len(), 2); @@ -565,17 +570,18 @@ mod enter { }); } - fn max_block_weight() -> Weight { - ::BlockWeights::get().max_block + fn max_block_weight_proof_size_adjusted() -> Weight { + let raw_weight = ::BlockWeights::get().max_block; + let block_length = ::BlockLength::get(); + raw_weight.set_proof_size(*block_length.max.get(DispatchClass::Mandatory) as u64) } fn inherent_data_weight(inherent_data: &ParachainsInherentData) -> Weight { use thousands::Separable; let multi_dispute_statement_sets_weight = - multi_dispute_statement_sets_weight::(&inherent_data.disputes); - let signed_bitfields_weight = - signed_bitfields_weight::(inherent_data.bitfields.len()); + multi_dispute_statement_sets_weight::(&inherent_data.disputes); + let signed_bitfields_weight = signed_bitfields_weight::(&inherent_data.bitfields); let backed_candidates_weight = backed_candidates_weight::(&inherent_data.backed_candidates); @@ -622,7 +628,8 @@ mod enter { }); let expected_para_inherent_data = scenario.data.clone(); - assert!(max_block_weight().any_lt(inherent_data_weight(&expected_para_inherent_data))); + assert!(max_block_weight_proof_size_adjusted() + .any_lt(inherent_data_weight(&expected_para_inherent_data))); // Check the para inherent data is as expected: // * 1 bitfield per validator (5 validators per core, 2 backed candidates, 3 disputes => 5*5 = 25) @@ -641,9 +648,10 @@ mod enter { // Expect that inherent data is filtered to include only 1 backed candidate and 2 disputes assert!(limit_inherent_data != expected_para_inherent_data); assert!( - max_block_weight().all_gte(inherent_data_weight(&limit_inherent_data)), + max_block_weight_proof_size_adjusted() + .all_gte(inherent_data_weight(&limit_inherent_data)), "Post limiting exceeded block weight: max={} vs. inherent={}", - max_block_weight(), + max_block_weight_proof_size_adjusted(), inherent_data_weight(&limit_inherent_data) ); @@ -675,8 +683,199 @@ mod enter { } #[test] + fn disputes_are_size_limited() { + BlockLength::set(limits::BlockLength::max_with_normal_ratio( + 600, + Perbill::from_percent(75), + )); + // Virtually no time based limit: + BlockWeights::set(frame_system::limits::BlockWeights::simple_max(Weight::from_parts( + u64::MAX, + u64::MAX, + ))); + new_test_ext(MockGenesisConfig::default()).execute_with(|| { + // Create the inherent data for this block + let mut dispute_statements = BTreeMap::new(); + dispute_statements.insert(2, 7); + dispute_statements.insert(3, 7); + dispute_statements.insert(4, 7); + + let backed_and_concluding = BTreeMap::new(); + + let scenario = make_inherent_data(TestConfig { + dispute_statements, + dispute_sessions: vec![2, 2, 1], // 3 cores with disputes + backed_and_concluding, + num_validators_per_core: 5, + code_upgrade: None, + }); + + let expected_para_inherent_data = scenario.data.clone(); + assert!(max_block_weight_proof_size_adjusted() + .any_lt(inherent_data_weight(&expected_para_inherent_data))); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (5 validators per core, 3 disputes => 3*5 = 15) + assert_eq!(expected_para_inherent_data.bitfields.len(), 15); + // * 2 backed candidates + assert_eq!(expected_para_inherent_data.backed_candidates.len(), 0); + // * 3 disputes. + assert_eq!(expected_para_inherent_data.disputes.len(), 3); + let mut inherent_data = InherentData::new(); + inherent_data + .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) + .unwrap(); + let limit_inherent_data = + Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); + // Expect that inherent data is filtered to include only 1 backed candidate and 2 disputes + assert!(limit_inherent_data != expected_para_inherent_data); + assert!( + max_block_weight_proof_size_adjusted() + .all_gte(inherent_data_weight(&limit_inherent_data)), + "Post limiting exceeded block weight: max={} vs. inherent={}", + max_block_weight_proof_size_adjusted(), + inherent_data_weight(&limit_inherent_data) + ); + + // * 1 bitfields - gone + assert_eq!(limit_inherent_data.bitfields.len(), 0); + // * 2 backed candidates - still none. + assert_eq!(limit_inherent_data.backed_candidates.len(), 0); + // * 3 disputes - filtered. + assert_eq!(limit_inherent_data.disputes.len(), 1); + }); + } + + #[test] + fn bitfields_are_size_limited() { + BlockLength::set(limits::BlockLength::max_with_normal_ratio( + 600, + Perbill::from_percent(75), + )); + // Virtually no time based limit: + BlockWeights::set(frame_system::limits::BlockWeights::simple_max(Weight::from_parts( + u64::MAX, + u64::MAX, + ))); + new_test_ext(MockGenesisConfig::default()).execute_with(|| { + // Create the inherent data for this block + let dispute_statements = BTreeMap::new(); + + let mut backed_and_concluding = BTreeMap::new(); + // 2 backed candidates shall be scheduled + backed_and_concluding.insert(0, 2); + backed_and_concluding.insert(1, 2); + + let scenario = make_inherent_data(TestConfig { + dispute_statements, + dispute_sessions: Vec::new(), + backed_and_concluding, + num_validators_per_core: 5, + code_upgrade: None, + }); + + let expected_para_inherent_data = scenario.data.clone(); + assert!(max_block_weight_proof_size_adjusted() + .any_lt(inherent_data_weight(&expected_para_inherent_data))); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (5 validators per core, 2 backed candidates => 2*5 = 10) + assert_eq!(expected_para_inherent_data.bitfields.len(), 10); + // * 2 backed candidates + assert_eq!(expected_para_inherent_data.backed_candidates.len(), 2); + // * 3 disputes. + assert_eq!(expected_para_inherent_data.disputes.len(), 0); + let mut inherent_data = InherentData::new(); + inherent_data + .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) + .unwrap(); + + let limit_inherent_data = + Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); + // Expect that inherent data is filtered to include only 1 backed candidate and 2 disputes + assert!(limit_inherent_data != expected_para_inherent_data); + assert!( + max_block_weight_proof_size_adjusted() + .all_gte(inherent_data_weight(&limit_inherent_data)), + "Post limiting exceeded block weight: max={} vs. inherent={}", + max_block_weight_proof_size_adjusted(), + inherent_data_weight(&limit_inherent_data) + ); + + // * 1 bitfields have been filtered + assert_eq!(limit_inherent_data.bitfields.len(), 8); + // * 2 backed candidates have been filtered as well (not even space for bitfields) + assert_eq!(limit_inherent_data.backed_candidates.len(), 0); + // * 3 disputes. Still none. + assert_eq!(limit_inherent_data.disputes.len(), 0); + }); + } + + #[test] + fn candidates_are_size_limited() { + BlockLength::set(limits::BlockLength::max_with_normal_ratio( + 1_300, + Perbill::from_percent(75), + )); + // Virtually no time based limit: + BlockWeights::set(frame_system::limits::BlockWeights::simple_max(Weight::from_parts( + u64::MAX, + u64::MAX, + ))); + new_test_ext(MockGenesisConfig::default()).execute_with(|| { + let mut backed_and_concluding = BTreeMap::new(); + // 2 backed candidates shall be scheduled + backed_and_concluding.insert(0, 2); + backed_and_concluding.insert(1, 2); + + let scenario = make_inherent_data(TestConfig { + dispute_statements: BTreeMap::new(), + dispute_sessions: Vec::new(), + backed_and_concluding, + num_validators_per_core: 5, + code_upgrade: None, + }); + + let expected_para_inherent_data = scenario.data.clone(); + assert!(max_block_weight_proof_size_adjusted() + .any_lt(inherent_data_weight(&expected_para_inherent_data))); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (5 validators per core, 2 backed candidates, 0 disputes => 2*5 = 10) + assert_eq!(expected_para_inherent_data.bitfields.len(), 10); + // * 2 backed candidates + assert_eq!(expected_para_inherent_data.backed_candidates.len(), 2); + // * 0 disputes. + assert_eq!(expected_para_inherent_data.disputes.len(), 0); + let mut inherent_data = InherentData::new(); + inherent_data + .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) + .unwrap(); + + let limit_inherent_data = + Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); + // Expect that inherent data is filtered to include only 1 backed candidate and 2 disputes + assert!(limit_inherent_data != expected_para_inherent_data); + assert!( + max_block_weight_proof_size_adjusted() + .all_gte(inherent_data_weight(&limit_inherent_data)), + "Post limiting exceeded block weight: max={} vs. inherent={}", + max_block_weight_proof_size_adjusted(), + inherent_data_weight(&limit_inherent_data) + ); + + // * 1 bitfields - no filtering here + assert_eq!(limit_inherent_data.bitfields.len(), 10); + // * 2 backed candidates + assert_eq!(limit_inherent_data.backed_candidates.len(), 1); + // * 0 disputes. + assert_eq!(limit_inherent_data.disputes.len(), 0); + }); + } + // Ensure that overweight parachain inherents are always rejected by the runtime. // Runtime should panic and return `InherentOverweight` error. + #[test] fn inherent_create_weight_invariant() { new_test_ext(MockGenesisConfig::default()).execute_with(|| { // Create an overweight inherent and oversized block @@ -700,7 +899,8 @@ mod enter { }); let expected_para_inherent_data = scenario.data.clone(); - assert!(max_block_weight().any_lt(inherent_data_weight(&expected_para_inherent_data))); + assert!(max_block_weight_proof_size_adjusted() + .any_lt(inherent_data_weight(&expected_para_inherent_data))); // Check the para inherent data is as expected: // * 1 bitfield per validator (5 validators per core, 30 backed candidates, 3 disputes => 5*33 = 165) @@ -713,7 +913,6 @@ mod enter { inherent_data .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) .unwrap(); - let dispatch_error = Pallet::::enter( frame_system::RawOrigin::None.into(), expected_para_inherent_data, @@ -724,8 +923,6 @@ mod enter { assert_eq!(dispatch_error, Error::::InherentOverweight.into()); }); } - - // TODO: Test process inherent invariant } fn default_header() -> primitives::Header { diff --git a/runtime/parachains/src/paras_inherent/weights.rs b/runtime/parachains/src/paras_inherent/weights.rs index f6e1262f5eb9..05cc53fae046 100644 --- a/runtime/parachains/src/paras_inherent/weights.rs +++ b/runtime/parachains/src/paras_inherent/weights.rs @@ -13,10 +13,20 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use super::{ - BackedCandidate, Config, DisputeStatementSet, UncheckedSignedAvailabilityBitfield, Weight, + +//! We use benchmarks to get time weights, for proof_size we manually use the size of the input +//! data, which will be part of the block. This is because we don't care about the storage proof on +//! the relay chain, but we do care about the size of the block, by putting the tx in the +//! proof_size we can use the already existing weight limiting code to limit the used size as well. + +use parity_scale_codec::{Encode, WrapperTypeEncode}; +use primitives::{ + CheckedMultiDisputeStatementSet, MultiDisputeStatementSet, UncheckedSignedAvailabilityBitfield, + UncheckedSignedAvailabilityBitfields, }; +use super::{BackedCandidate, Config, DisputeStatementSet, Weight}; + pub trait WeightInfo { /// Variant over `v`, the count of dispute statements in a dispute statement set. This gives the /// weight of a single dispute statement set. @@ -72,51 +82,82 @@ impl WeightInfo for TestWeightInfo { pub fn paras_inherent_total_weight( backed_candidates: &[BackedCandidate<::Hash>], - bitfields: &[UncheckedSignedAvailabilityBitfield], - disputes: &[DisputeStatementSet], + bitfields: &UncheckedSignedAvailabilityBitfields, + disputes: &MultiDisputeStatementSet, ) -> Weight { backed_candidates_weight::(backed_candidates) - .saturating_add(signed_bitfields_weight::(bitfields.len())) - .saturating_add(multi_dispute_statement_sets_weight::(disputes)) + .saturating_add(signed_bitfields_weight::(bitfields)) + .saturating_add(multi_dispute_statement_sets_weight::(disputes)) } -pub fn dispute_statement_set_weight>( - statement_set: S, +pub fn multi_dispute_statement_sets_weight( + disputes: &MultiDisputeStatementSet, ) -> Weight { - <::WeightInfo as WeightInfo>::enter_variable_disputes( - statement_set.as_ref().statements.len() as u32, + set_proof_size_to_tx_size( + disputes + .iter() + .map(|d| dispute_statement_set_weight::(d)) + .fold(Weight::zero(), |acc_weight, weight| acc_weight.saturating_add(weight)), + disputes, ) } -pub fn multi_dispute_statement_sets_weight< +pub fn checked_multi_dispute_statement_sets_weight( + disputes: &CheckedMultiDisputeStatementSet, +) -> Weight { + set_proof_size_to_tx_size( + disputes + .iter() + .map(|d| dispute_statement_set_weight::(d)) + .fold(Weight::zero(), |acc_weight, weight| acc_weight.saturating_add(weight)), + disputes, + ) +} + +/// Get time weights from benchmarks and set proof size to tx size. +pub fn dispute_statement_set_weight(statement_set: D) -> Weight +where T: Config, - D: AsRef<[S]>, - S: AsRef, ->( - disputes: D, + D: AsRef + WrapperTypeEncode + Sized + Encode, +{ + set_proof_size_to_tx_size( + <::WeightInfo as WeightInfo>::enter_variable_disputes( + statement_set.as_ref().statements.len() as u32, + ), + statement_set, + ) +} + +pub fn signed_bitfields_weight( + bitfields: &UncheckedSignedAvailabilityBitfields, ) -> Weight { - disputes - .as_ref() - .iter() - .map(|d| dispute_statement_set_weight::(d)) - .fold(Weight::zero(), |acc_weight, weight| acc_weight.saturating_add(weight)) + set_proof_size_to_tx_size( + <::WeightInfo as WeightInfo>::enter_bitfields() + .saturating_mul(bitfields.len() as u64), + bitfields, + ) } -pub fn signed_bitfields_weight(bitfields_len: usize) -> Weight { - <::WeightInfo as WeightInfo>::enter_bitfields() - .saturating_mul(bitfields_len as u64) +pub fn signed_bitfield_weight(bitfield: &UncheckedSignedAvailabilityBitfield) -> Weight { + set_proof_size_to_tx_size( + <::WeightInfo as WeightInfo>::enter_bitfields(), + bitfield, + ) } pub fn backed_candidate_weight( candidate: &BackedCandidate, ) -> Weight { - if candidate.candidate.commitments.new_validation_code.is_some() { - <::WeightInfo as WeightInfo>::enter_backed_candidate_code_upgrade() - } else { - <::WeightInfo as WeightInfo>::enter_backed_candidates_variable( - candidate.validity_votes.len() as u32, - ) - } + set_proof_size_to_tx_size( + if candidate.candidate.commitments.new_validation_code.is_some() { + <::WeightInfo as WeightInfo>::enter_backed_candidate_code_upgrade() + } else { + <::WeightInfo as WeightInfo>::enter_backed_candidates_variable( + candidate.validity_votes.len() as u32, + ) + }, + candidate, + ) } pub fn backed_candidates_weight( @@ -127,3 +168,8 @@ pub fn backed_candidates_weight( .map(|c| backed_candidate_weight::(c)) .fold(Weight::zero(), |acc, x| acc.saturating_add(x)) } + +/// Set proof_size component of `Weight` to tx size. +fn set_proof_size_to_tx_size(weight: Weight, arg: Arg) -> Weight { + weight.set_proof_size(arg.encoded_size() as u64) +} diff --git a/runtime/polkadot/src/weights/runtime_parachains_paras_inherent.rs b/runtime/polkadot/src/weights/runtime_parachains_paras_inherent.rs index ae1c502ae921..70eb764305e4 100644 --- a/runtime/polkadot/src/weights/runtime_parachains_paras_inherent.rs +++ b/runtime/polkadot/src/weights/runtime_parachains_paras_inherent.rs @@ -17,27 +17,26 @@ //! Autogenerated weights for `runtime_parachains::paras_inherent` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-xerhrdyb-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("polkadot-dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot +// target/production/polkadot // benchmark // pallet -// --chain=polkadot-dev // --steps=50 // --repeat=20 -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --pallet=runtime_parachains::paras_inherent // --extrinsic=* // --execution=wasm // --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=runtime_parachains::paras_inherent +// --chain=polkadot-dev // --header=./file_header.txt -// --output=./runtime/polkadot/src/weights/runtime_parachains_paras_inherent.rs +// --output=./runtime/polkadot/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -117,11 +116,11 @@ impl runtime_parachains::paras_inherent::WeightInfo for // Proof Size summary in bytes: // Measured: `50915` // Estimated: `56855 + v * (23 ±0)` - // Minimum execution time: 999_775_000 picoseconds. - Weight::from_parts(461_856_558, 0) + // Minimum execution time: 999_704_000 picoseconds. + Weight::from_parts(455_751_887, 0) .saturating_add(Weight::from_parts(0, 56855)) - // Standard Error: 15_669 - .saturating_add(Weight::from_parts(56_847_986, 0).saturating_mul(v.into())) + // Standard Error: 14_301 + .saturating_add(Weight::from_parts(57_084_663, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(28)) .saturating_add(T::DbWeight::get().writes(15)) .saturating_add(Weight::from_parts(0, 23).saturating_mul(v.into())) @@ -190,8 +189,8 @@ impl runtime_parachains::paras_inherent::WeightInfo for // Proof Size summary in bytes: // Measured: `42748` // Estimated: `48688` - // Minimum execution time: 457_800_000 picoseconds. - Weight::from_parts(482_446_000, 0) + // Minimum execution time: 485_153_000 picoseconds. + Weight::from_parts(504_774_000, 0) .saturating_add(Weight::from_parts(0, 48688)) .saturating_add(T::DbWeight::get().reads(26)) .saturating_add(T::DbWeight::get().writes(16)) @@ -265,11 +264,11 @@ impl runtime_parachains::paras_inherent::WeightInfo for // Proof Size summary in bytes: // Measured: `42784` // Estimated: `48724` - // Minimum execution time: 6_889_257_000 picoseconds. - Weight::from_parts(1_240_166_857, 0) + // Minimum execution time: 6_906_795_000 picoseconds. + Weight::from_parts(1_315_944_667, 0) .saturating_add(Weight::from_parts(0, 48724)) - // Standard Error: 23_642 - .saturating_add(Weight::from_parts(56_311_928, 0).saturating_mul(v.into())) + // Standard Error: 31_132 + .saturating_add(Weight::from_parts(55_792_755, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(29)) .saturating_add(T::DbWeight::get().writes(15)) } @@ -345,8 +344,8 @@ impl runtime_parachains::paras_inherent::WeightInfo for // Proof Size summary in bytes: // Measured: `42811` // Estimated: `48751` - // Minimum execution time: 41_983_250_000 picoseconds. - Weight::from_parts(43_216_188_000, 0) + // Minimum execution time: 44_487_810_000 picoseconds. + Weight::from_parts(46_317_208_000, 0) .saturating_add(Weight::from_parts(0, 48751)) .saturating_add(T::DbWeight::get().reads(31)) .saturating_add(T::DbWeight::get().writes(15)) diff --git a/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs b/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs index 72f70f8c4205..0dd64f054d00 100644 --- a/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs +++ b/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs @@ -17,27 +17,26 @@ //! Autogenerated weights for `runtime_parachains::paras_inherent` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner--ss9ysm1-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-xerhrdyb-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot +// target/production/polkadot // benchmark // pallet -// --chain=westend-dev // --steps=50 // --repeat=20 -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --pallet=runtime_parachains::paras_inherent // --extrinsic=* // --execution=wasm // --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=runtime_parachains::paras_inherent +// --chain=westend-dev // --header=./file_header.txt -// --output=./runtime/westend/src/weights/runtime_parachains_paras_inherent.rs +// --output=./runtime/westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -115,11 +114,11 @@ impl runtime_parachains::paras_inherent::WeightInfo for // Proof Size summary in bytes: // Measured: `50518` // Estimated: `56458 + v * (23 ±0)` - // Minimum execution time: 992_257_000 picoseconds. - Weight::from_parts(473_224_562, 0) + // Minimum execution time: 998_338_000 picoseconds. + Weight::from_parts(468_412_001, 0) .saturating_add(Weight::from_parts(0, 56458)) - // Standard Error: 50_055 - .saturating_add(Weight::from_parts(57_274_046, 0).saturating_mul(v.into())) + // Standard Error: 20_559 + .saturating_add(Weight::from_parts(56_965_025, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(27)) .saturating_add(T::DbWeight::get().writes(15)) .saturating_add(Weight::from_parts(0, 23).saturating_mul(v.into())) @@ -186,8 +185,8 @@ impl runtime_parachains::paras_inherent::WeightInfo for // Proof Size summary in bytes: // Measured: `42352` // Estimated: `48292` - // Minimum execution time: 445_965_000 picoseconds. - Weight::from_parts(476_329_000, 0) + // Minimum execution time: 457_404_000 picoseconds. + Weight::from_parts(485_416_000, 0) .saturating_add(Weight::from_parts(0, 48292)) .saturating_add(T::DbWeight::get().reads(25)) .saturating_add(T::DbWeight::get().writes(16)) @@ -259,11 +258,11 @@ impl runtime_parachains::paras_inherent::WeightInfo for // Proof Size summary in bytes: // Measured: `42387` // Estimated: `48327` - // Minimum execution time: 6_877_099_000 picoseconds. - Weight::from_parts(1_267_644_471, 0) + // Minimum execution time: 6_864_029_000 picoseconds. + Weight::from_parts(1_237_704_892, 0) .saturating_add(Weight::from_parts(0, 48327)) - // Standard Error: 96_443 - .saturating_add(Weight::from_parts(56_535_707, 0).saturating_mul(v.into())) + // Standard Error: 33_413 + .saturating_add(Weight::from_parts(56_199_819, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(28)) .saturating_add(T::DbWeight::get().writes(15)) } @@ -337,8 +336,8 @@ impl runtime_parachains::paras_inherent::WeightInfo for // Proof Size summary in bytes: // Measured: `42414` // Estimated: `48354` - // Minimum execution time: 40_882_969_000 picoseconds. - Weight::from_parts(45_409_238_000, 0) + // Minimum execution time: 43_320_529_000 picoseconds. + Weight::from_parts(45_622_613_000, 0) .saturating_add(Weight::from_parts(0, 48354)) .saturating_add(T::DbWeight::get().reads(30)) .saturating_add(T::DbWeight::get().writes(15)) From 70aed93c93e74c17e490ef8be40c7a4258b0165e Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Wed, 2 Aug 2023 16:37:12 +0300 Subject: [PATCH 11/35] [companion] Get rid of `Peerset` compatibility layer (#7355) * Update `NetworkPeers` trait interface * update lockfile for {"substrate"} --------- Co-authored-by: parity-processbot <> --- Cargo.lock | 370 +++++++++--------- node/network/bridge/src/network.rs | 14 +- node/network/bridge/src/rx/tests.rs | 8 +- node/network/bridge/src/tx/tests.rs | 8 +- .../network/bridge/src/validator_discovery.rs | 7 +- 5 files changed, 217 insertions(+), 190 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1037808b8e94..2087d3f597d4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -438,7 +438,7 @@ dependencies = [ [[package]] name = "binary-merkle-tree" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "hash-db", "log", @@ -2223,7 +2223,7 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fork-tree" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "parity-scale-codec", ] @@ -2246,7 +2246,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frame-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-support", "frame-support-procedural", @@ -2271,7 +2271,7 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "Inflector", "array-bytes", @@ -2319,7 +2319,7 @@ dependencies = [ [[package]] name = "frame-election-provider-solution-type" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2330,7 +2330,7 @@ dependencies = [ [[package]] name = "frame-election-provider-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-election-provider-solution-type", "frame-support", @@ -2347,7 +2347,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-support", "frame-system", @@ -2376,7 +2376,7 @@ dependencies = [ [[package]] name = "frame-remote-externalities" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "async-recursion", "futures", @@ -2397,7 +2397,7 @@ dependencies = [ [[package]] name = "frame-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "aquamarine", "bitflags", @@ -2434,7 +2434,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "Inflector", "cfg-expr", @@ -2452,7 +2452,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", @@ -2464,7 +2464,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "proc-macro2", "quote", @@ -2474,7 +2474,7 @@ dependencies = [ [[package]] name = "frame-support-test" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-executive", @@ -2501,7 +2501,7 @@ dependencies = [ [[package]] name = "frame-support-test-pallet" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-support", "frame-system", @@ -2514,7 +2514,7 @@ dependencies = [ [[package]] name = "frame-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "cfg-if", "frame-support", @@ -2533,7 +2533,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -2548,7 +2548,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "parity-scale-codec", "sp-api", @@ -2557,7 +2557,7 @@ dependencies = [ [[package]] name = "frame-try-runtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-support", "parity-scale-codec", @@ -2739,7 +2739,7 @@ dependencies = [ [[package]] name = "generate-bags" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "chrono", "frame-election-provider-support", @@ -4594,7 +4594,7 @@ dependencies = [ [[package]] name = "mmr-gadget" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "futures", "log", @@ -4613,7 +4613,7 @@ dependencies = [ [[package]] name = "mmr-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "anyhow", "jsonrpsee", @@ -5139,7 +5139,7 @@ dependencies = [ [[package]] name = "pallet-assets" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5154,7 +5154,7 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-support", "frame-system", @@ -5170,7 +5170,7 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-support", "frame-system", @@ -5184,7 +5184,7 @@ dependencies = [ [[package]] name = "pallet-babe" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5208,7 +5208,7 @@ dependencies = [ [[package]] name = "pallet-bags-list" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5228,7 +5228,7 @@ dependencies = [ [[package]] name = "pallet-bags-list-remote-tests" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-election-provider-support", "frame-remote-externalities", @@ -5247,7 +5247,7 @@ dependencies = [ [[package]] name = "pallet-balances" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5262,7 +5262,7 @@ dependencies = [ [[package]] name = "pallet-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-support", "frame-system", @@ -5281,7 +5281,7 @@ dependencies = [ [[package]] name = "pallet-beefy-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "array-bytes", "binary-merkle-tree", @@ -5305,7 +5305,7 @@ dependencies = [ [[package]] name = "pallet-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5323,7 +5323,7 @@ dependencies = [ [[package]] name = "pallet-child-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5342,7 +5342,7 @@ dependencies = [ [[package]] name = "pallet-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5359,7 +5359,7 @@ dependencies = [ [[package]] name = "pallet-conviction-voting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "assert_matches", "frame-benchmarking", @@ -5376,7 +5376,7 @@ dependencies = [ [[package]] name = "pallet-democracy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5394,7 +5394,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-multi-phase" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5417,7 +5417,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-support-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5430,7 +5430,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5449,7 +5449,7 @@ dependencies = [ [[package]] name = "pallet-fast-unstake" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "docify", "frame-benchmarking", @@ -5468,7 +5468,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5491,7 +5491,7 @@ dependencies = [ [[package]] name = "pallet-identity" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "enumflags2", "frame-benchmarking", @@ -5507,7 +5507,7 @@ dependencies = [ [[package]] name = "pallet-im-online" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5527,7 +5527,7 @@ dependencies = [ [[package]] name = "pallet-indices" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5544,7 +5544,7 @@ dependencies = [ [[package]] name = "pallet-membership" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5561,7 +5561,7 @@ dependencies = [ [[package]] name = "pallet-message-queue" version = "7.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5580,7 +5580,7 @@ dependencies = [ [[package]] name = "pallet-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5597,7 +5597,7 @@ dependencies = [ [[package]] name = "pallet-multisig" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5613,7 +5613,7 @@ dependencies = [ [[package]] name = "pallet-nis" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5629,11 +5629,12 @@ dependencies = [ [[package]] name = "pallet-nomination-pools" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-support", "frame-system", "log", + "pallet-balances", "parity-scale-codec", "scale-info", "sp-core", @@ -5641,12 +5642,13 @@ dependencies = [ "sp-runtime", "sp-staking", "sp-std", + "sp-tracing", ] [[package]] name = "pallet-nomination-pools-benchmarking" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5666,7 +5668,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-runtime-api" version = "1.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "pallet-nomination-pools", "parity-scale-codec", @@ -5677,7 +5679,7 @@ dependencies = [ [[package]] name = "pallet-offences" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-support", "frame-system", @@ -5694,7 +5696,7 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5718,7 +5720,7 @@ dependencies = [ [[package]] name = "pallet-preimage" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5735,7 +5737,7 @@ dependencies = [ [[package]] name = "pallet-proxy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5750,7 +5752,7 @@ dependencies = [ [[package]] name = "pallet-ranked-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5768,7 +5770,7 @@ dependencies = [ [[package]] name = "pallet-recovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5783,7 +5785,7 @@ dependencies = [ [[package]] name = "pallet-referenda" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "assert_matches", "frame-benchmarking", @@ -5802,7 +5804,7 @@ dependencies = [ [[package]] name = "pallet-scheduler" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5819,7 +5821,7 @@ dependencies = [ [[package]] name = "pallet-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-support", "frame-system", @@ -5840,7 +5842,7 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5856,7 +5858,7 @@ dependencies = [ [[package]] name = "pallet-society" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5875,7 +5877,7 @@ dependencies = [ [[package]] name = "pallet-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5898,7 +5900,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -5909,7 +5911,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-fn" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "log", "sp-arithmetic", @@ -5918,7 +5920,7 @@ dependencies = [ [[package]] name = "pallet-staking-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "parity-scale-codec", "sp-api", @@ -5927,7 +5929,7 @@ dependencies = [ [[package]] name = "pallet-state-trie-migration" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5944,7 +5946,7 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5959,7 +5961,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5977,7 +5979,7 @@ dependencies = [ [[package]] name = "pallet-tips" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5996,7 +5998,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-support", "frame-system", @@ -6012,7 +6014,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", @@ -6028,7 +6030,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "pallet-transaction-payment", "parity-scale-codec", @@ -6040,7 +6042,7 @@ dependencies = [ [[package]] name = "pallet-treasury" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6057,7 +6059,7 @@ dependencies = [ [[package]] name = "pallet-uniques" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6072,7 +6074,7 @@ dependencies = [ [[package]] name = "pallet-utility" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6088,7 +6090,7 @@ dependencies = [ [[package]] name = "pallet-vesting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6103,7 +6105,7 @@ dependencies = [ [[package]] name = "pallet-whitelist" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-benchmarking", "frame-support", @@ -9113,7 +9115,7 @@ dependencies = [ [[package]] name = "sc-allocator" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "log", "sp-core", @@ -9124,7 +9126,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "async-trait", "futures", @@ -9152,7 +9154,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "futures", "futures-timer", @@ -9175,7 +9177,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -9190,7 +9192,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "memmap2", "sc-chain-spec-derive", @@ -9209,7 +9211,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -9220,7 +9222,7 @@ dependencies = [ [[package]] name = "sc-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "array-bytes", "chrono", @@ -9259,7 +9261,7 @@ dependencies = [ [[package]] name = "sc-client-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "fnv", "futures", @@ -9285,7 +9287,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "hash-db", "kvdb", @@ -9311,7 +9313,7 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "async-trait", "futures", @@ -9336,7 +9338,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "async-trait", "fork-tree", @@ -9372,7 +9374,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "futures", "jsonrpsee", @@ -9394,7 +9396,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "array-bytes", "async-channel", @@ -9428,7 +9430,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "futures", "jsonrpsee", @@ -9447,7 +9449,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "fork-tree", "parity-scale-codec", @@ -9460,7 +9462,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "ahash 0.8.2", "array-bytes", @@ -9501,7 +9503,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "finality-grandpa", "futures", @@ -9521,7 +9523,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "async-trait", "futures", @@ -9544,7 +9546,7 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -9566,7 +9568,7 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "sc-allocator", "sp-maybe-compressed-blob", @@ -9578,7 +9580,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "anyhow", "cfg-if", @@ -9595,7 +9597,7 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "ansi_term", "futures", @@ -9611,7 +9613,7 @@ dependencies = [ [[package]] name = "sc-keystore" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "array-bytes", "parking_lot 0.12.1", @@ -9625,7 +9627,7 @@ dependencies = [ [[package]] name = "sc-network" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "array-bytes", "async-channel", @@ -9668,7 +9670,7 @@ dependencies = [ [[package]] name = "sc-network-bitswap" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "async-channel", "cid", @@ -9688,7 +9690,7 @@ dependencies = [ [[package]] name = "sc-network-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "async-trait", "bitflags", @@ -9705,7 +9707,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "ahash 0.8.2", "futures", @@ -9724,7 +9726,7 @@ dependencies = [ [[package]] name = "sc-network-light" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "array-bytes", "async-channel", @@ -9745,7 +9747,7 @@ dependencies = [ [[package]] name = "sc-network-sync" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "array-bytes", "async-channel", @@ -9779,7 +9781,7 @@ dependencies = [ [[package]] name = "sc-network-transactions" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "array-bytes", "futures", @@ -9797,7 +9799,7 @@ dependencies = [ [[package]] name = "sc-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "array-bytes", "bytes", @@ -9831,7 +9833,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -9840,7 +9842,7 @@ dependencies = [ [[package]] name = "sc-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "futures", "jsonrpsee", @@ -9871,7 +9873,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -9890,7 +9892,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "http", "jsonrpsee", @@ -9905,7 +9907,7 @@ dependencies = [ [[package]] name = "sc-rpc-spec-v2" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "array-bytes", "futures", @@ -9931,7 +9933,7 @@ dependencies = [ [[package]] name = "sc-service" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "async-trait", "directories", @@ -9995,7 +9997,7 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "log", "parity-scale-codec", @@ -10006,7 +10008,7 @@ dependencies = [ [[package]] name = "sc-storage-monitor" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "clap 4.2.5", "fs4", @@ -10020,7 +10022,7 @@ dependencies = [ [[package]] name = "sc-sync-state-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -10039,7 +10041,7 @@ dependencies = [ [[package]] name = "sc-sysinfo" version = "6.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "futures", "libc", @@ -10058,7 +10060,7 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "chrono", "futures", @@ -10077,7 +10079,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "ansi_term", "atty", @@ -10106,7 +10108,7 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -10117,7 +10119,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "async-trait", "futures", @@ -10143,7 +10145,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "async-trait", "futures", @@ -10159,7 +10161,7 @@ dependencies = [ [[package]] name = "sc-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "async-channel", "futures", @@ -10707,7 +10709,7 @@ dependencies = [ [[package]] name = "sp-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "hash-db", "log", @@ -10728,7 +10730,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "Inflector", "blake2", @@ -10742,7 +10744,7 @@ dependencies = [ [[package]] name = "sp-application-crypto" version = "23.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "parity-scale-codec", "scale-info", @@ -10755,7 +10757,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "16.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "integer-sqrt", "num-traits", @@ -10769,7 +10771,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "parity-scale-codec", "scale-info", @@ -10782,7 +10784,7 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "sp-api", "sp-inherents", @@ -10793,7 +10795,7 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "futures", "log", @@ -10811,7 +10813,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "async-trait", "futures", @@ -10826,7 +10828,7 @@ dependencies = [ [[package]] name = "sp-consensus-aura" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "async-trait", "parity-scale-codec", @@ -10843,7 +10845,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "async-trait", "parity-scale-codec", @@ -10862,7 +10864,7 @@ dependencies = [ [[package]] name = "sp-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "lazy_static", "parity-scale-codec", @@ -10881,7 +10883,7 @@ dependencies = [ [[package]] name = "sp-consensus-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "finality-grandpa", "log", @@ -10899,7 +10901,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "parity-scale-codec", "scale-info", @@ -10911,7 +10913,7 @@ dependencies = [ [[package]] name = "sp-core" version = "21.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "array-bytes", "bitflags", @@ -10956,7 +10958,7 @@ dependencies = [ [[package]] name = "sp-core-hashing" version = "9.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "blake2b_simd", "byteorder", @@ -10969,7 +10971,7 @@ dependencies = [ [[package]] name = "sp-core-hashing-proc-macro" version = "9.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "quote", "sp-core-hashing", @@ -10979,7 +10981,7 @@ dependencies = [ [[package]] name = "sp-database" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "kvdb", "parking_lot 0.12.1", @@ -10988,7 +10990,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "proc-macro2", "quote", @@ -10998,7 +11000,7 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.19.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "environmental", "parity-scale-codec", @@ -11009,7 +11011,7 @@ dependencies = [ [[package]] name = "sp-genesis-builder" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "serde_json", "sp-api", @@ -11020,7 +11022,7 @@ dependencies = [ [[package]] name = "sp-inherents" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -11034,7 +11036,7 @@ dependencies = [ [[package]] name = "sp-io" version = "23.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "bytes", "ed25519", @@ -11059,7 +11061,7 @@ dependencies = [ [[package]] name = "sp-keyring" version = "24.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "lazy_static", "sp-core", @@ -11070,7 +11072,7 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.27.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -11082,7 +11084,7 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "thiserror", "zstd 0.12.3+zstd.1.5.2", @@ -11091,7 +11093,7 @@ dependencies = [ [[package]] name = "sp-metadata-ir" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-metadata", "parity-scale-codec", @@ -11102,7 +11104,7 @@ dependencies = [ [[package]] name = "sp-mmr-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "ckb-merkle-mountain-range", "log", @@ -11120,7 +11122,7 @@ dependencies = [ [[package]] name = "sp-npos-elections" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "parity-scale-codec", "scale-info", @@ -11134,7 +11136,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "sp-api", "sp-core", @@ -11144,7 +11146,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "backtrace", "lazy_static", @@ -11154,7 +11156,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "rustc-hash", "serde", @@ -11164,7 +11166,7 @@ dependencies = [ [[package]] name = "sp-runtime" version = "24.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "either", "hash256-std-hasher", @@ -11186,7 +11188,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "17.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -11204,7 +11206,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "11.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "Inflector", "proc-macro-crate", @@ -11216,7 +11218,7 @@ dependencies = [ [[package]] name = "sp-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "parity-scale-codec", "scale-info", @@ -11231,7 +11233,7 @@ dependencies = [ [[package]] name = "sp-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -11245,7 +11247,7 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.28.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "hash-db", "log", @@ -11266,7 +11268,7 @@ dependencies = [ [[package]] name = "sp-statement-store" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "aes-gcm 0.10.2", "curve25519-dalek 3.2.0", @@ -11290,12 +11292,12 @@ dependencies = [ [[package]] name = "sp-std" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" [[package]] name = "sp-storage" version = "13.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "impl-serde", "parity-scale-codec", @@ -11308,7 +11310,7 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "async-trait", "parity-scale-codec", @@ -11321,7 +11323,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "10.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "parity-scale-codec", "sp-std", @@ -11333,7 +11335,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "sp-api", "sp-runtime", @@ -11342,7 +11344,7 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "async-trait", "parity-scale-codec", @@ -11357,7 +11359,7 @@ dependencies = [ [[package]] name = "sp-trie" version = "22.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "ahash 0.8.2", "hash-db", @@ -11380,7 +11382,7 @@ dependencies = [ [[package]] name = "sp-version" version = "22.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "impl-serde", "parity-scale-codec", @@ -11397,7 +11399,7 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "parity-scale-codec", "proc-macro2", @@ -11408,7 +11410,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "14.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "anyhow", "impl-trait-for-tuples", @@ -11421,7 +11423,7 @@ dependencies = [ [[package]] name = "sp-weights" version = "20.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "parity-scale-codec", "scale-info", @@ -11646,12 +11648,12 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" [[package]] name = "substrate-frame-rpc-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "frame-system-rpc-runtime-api", "futures", @@ -11670,7 +11672,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "hyper", "log", @@ -11682,7 +11684,7 @@ dependencies = [ [[package]] name = "substrate-rpc-client" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "async-trait", "jsonrpsee", @@ -11695,7 +11697,7 @@ dependencies = [ [[package]] name = "substrate-state-trie-migration-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -11712,7 +11714,7 @@ dependencies = [ [[package]] name = "substrate-test-client" version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "array-bytes", "async-trait", @@ -11738,7 +11740,7 @@ dependencies = [ [[package]] name = "substrate-test-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "futures", "substrate-test-utils-derive", @@ -11748,7 +11750,7 @@ dependencies = [ [[package]] name = "substrate-test-utils-derive" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -11759,7 +11761,7 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "ansi_term", "build-helper", @@ -12636,7 +12638,7 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "try-runtime-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#96f8c97dff7f419a349ee01d02613445ba4a41f8" +source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" dependencies = [ "async-trait", "clap 4.2.5", diff --git a/node/network/bridge/src/network.rs b/node/network/bridge/src/network.rs index f309224d175e..04026197f6eb 100644 --- a/node/network/bridge/src/network.rs +++ b/node/network/bridge/src/network.rs @@ -94,7 +94,11 @@ pub trait Network: Clone + Send + 'static { ) -> Result<(), String>; /// Removes the peers for the protocol's peer set (both reserved and non-reserved). - async fn remove_from_peers_set(&mut self, protocol: ProtocolName, peers: Vec); + async fn remove_from_peers_set( + &mut self, + protocol: ProtocolName, + peers: Vec, + ) -> Result<(), String>; /// Send a request to a remote peer. async fn start_request( @@ -129,8 +133,12 @@ impl Network for Arc> { NetworkService::set_reserved_peers(&**self, protocol, multiaddresses) } - async fn remove_from_peers_set(&mut self, protocol: ProtocolName, peers: Vec) { - NetworkService::remove_peers_from_reserved_set(&**self, protocol, peers); + async fn remove_from_peers_set( + &mut self, + protocol: ProtocolName, + peers: Vec, + ) -> Result<(), String> { + NetworkService::remove_peers_from_reserved_set(&**self, protocol, peers) } fn report_peer(&self, who: PeerId, rep: ReputationChange) { diff --git a/node/network/bridge/src/rx/tests.rs b/node/network/bridge/src/rx/tests.rs index d98b1d4f0856..078f6591ae2a 100644 --- a/node/network/bridge/src/rx/tests.rs +++ b/node/network/bridge/src/rx/tests.rs @@ -117,7 +117,13 @@ impl Network for TestNetwork { Ok(()) } - async fn remove_from_peers_set(&mut self, _protocol: ProtocolName, _: Vec) {} + async fn remove_from_peers_set( + &mut self, + _protocol: ProtocolName, + _: Vec, + ) -> Result<(), String> { + Ok(()) + } async fn start_request( &self, diff --git a/node/network/bridge/src/tx/tests.rs b/node/network/bridge/src/tx/tests.rs index f6ff16237b7d..520218d3c481 100644 --- a/node/network/bridge/src/tx/tests.rs +++ b/node/network/bridge/src/tx/tests.rs @@ -105,7 +105,13 @@ impl Network for TestNetwork { Ok(()) } - async fn remove_from_peers_set(&mut self, _protocol: ProtocolName, _: Vec) {} + async fn remove_from_peers_set( + &mut self, + _protocol: ProtocolName, + _: Vec, + ) -> Result<(), String> { + Ok(()) + } async fn start_request( &self, diff --git a/node/network/bridge/src/validator_discovery.rs b/node/network/bridge/src/validator_discovery.rs index 3b4bed3987d7..098416c5b88d 100644 --- a/node/network/bridge/src/validator_discovery.rs +++ b/node/network/bridge/src/validator_discovery.rs @@ -236,8 +236,13 @@ mod tests { Ok(()) } - async fn remove_from_peers_set(&mut self, _protocol: ProtocolName, peers: Vec) { + async fn remove_from_peers_set( + &mut self, + _protocol: ProtocolName, + peers: Vec, + ) -> Result<(), String> { self.peers_set.retain(|elem| !peers.contains(elem)); + Ok(()) } async fn start_request( From 88c1a7004eb220e48a35dfbd7ee6e80d25153f10 Mon Sep 17 00:00:00 2001 From: drskalman <35698397+drskalman@users.noreply.github.com> Date: Wed, 2 Aug 2023 17:25:12 +0200 Subject: [PATCH 12/35] Companion for Substrate#14373 (#7572) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * rename BEEFY `crypto` →`ecdsa_crypto` * - bump up `BeefyApi` to version 3 - deal with `PeerId` error. * update BEEFY dependency names for `fake-runtime` and `chain_spec` revert Cargo.toml * cargo fmt * Use master Cargo.lock * update lockfile for {"substrate"} --------- Co-authored-by: Davide Galassi Co-authored-by: parity-processbot <> --- Cargo.lock | 368 +++++++++++++-------------- node/service/src/chain_spec.rs | 2 +- node/service/src/fake_runtime_api.rs | 4 +- runtime/kusama/src/lib.rs | 4 +- runtime/kusama/src/xcm_config.rs | 13 +- runtime/polkadot/src/lib.rs | 4 +- runtime/rococo/src/lib.rs | 6 +- runtime/test-runtime/src/lib.rs | 4 +- runtime/westend/src/lib.rs | 4 +- 9 files changed, 206 insertions(+), 203 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2087d3f597d4..297c85c67093 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -438,7 +438,7 @@ dependencies = [ [[package]] name = "binary-merkle-tree" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "hash-db", "log", @@ -2223,7 +2223,7 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fork-tree" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "parity-scale-codec", ] @@ -2246,7 +2246,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frame-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-support", "frame-support-procedural", @@ -2271,7 +2271,7 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "Inflector", "array-bytes", @@ -2319,7 +2319,7 @@ dependencies = [ [[package]] name = "frame-election-provider-solution-type" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2330,7 +2330,7 @@ dependencies = [ [[package]] name = "frame-election-provider-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-election-provider-solution-type", "frame-support", @@ -2347,7 +2347,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-support", "frame-system", @@ -2376,7 +2376,7 @@ dependencies = [ [[package]] name = "frame-remote-externalities" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "async-recursion", "futures", @@ -2397,7 +2397,7 @@ dependencies = [ [[package]] name = "frame-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "aquamarine", "bitflags", @@ -2434,7 +2434,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "Inflector", "cfg-expr", @@ -2452,7 +2452,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", @@ -2464,7 +2464,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "proc-macro2", "quote", @@ -2474,7 +2474,7 @@ dependencies = [ [[package]] name = "frame-support-test" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-executive", @@ -2501,7 +2501,7 @@ dependencies = [ [[package]] name = "frame-support-test-pallet" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-support", "frame-system", @@ -2514,7 +2514,7 @@ dependencies = [ [[package]] name = "frame-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "cfg-if", "frame-support", @@ -2533,7 +2533,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -2548,7 +2548,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "parity-scale-codec", "sp-api", @@ -2557,7 +2557,7 @@ dependencies = [ [[package]] name = "frame-try-runtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-support", "parity-scale-codec", @@ -2739,7 +2739,7 @@ dependencies = [ [[package]] name = "generate-bags" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "chrono", "frame-election-provider-support", @@ -4594,7 +4594,7 @@ dependencies = [ [[package]] name = "mmr-gadget" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "futures", "log", @@ -4613,7 +4613,7 @@ dependencies = [ [[package]] name = "mmr-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "anyhow", "jsonrpsee", @@ -5139,7 +5139,7 @@ dependencies = [ [[package]] name = "pallet-assets" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -5154,7 +5154,7 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-support", "frame-system", @@ -5170,7 +5170,7 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-support", "frame-system", @@ -5184,7 +5184,7 @@ dependencies = [ [[package]] name = "pallet-babe" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -5208,7 +5208,7 @@ dependencies = [ [[package]] name = "pallet-bags-list" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5228,7 +5228,7 @@ dependencies = [ [[package]] name = "pallet-bags-list-remote-tests" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-election-provider-support", "frame-remote-externalities", @@ -5247,7 +5247,7 @@ dependencies = [ [[package]] name = "pallet-balances" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -5262,7 +5262,7 @@ dependencies = [ [[package]] name = "pallet-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-support", "frame-system", @@ -5281,7 +5281,7 @@ dependencies = [ [[package]] name = "pallet-beefy-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "array-bytes", "binary-merkle-tree", @@ -5305,7 +5305,7 @@ dependencies = [ [[package]] name = "pallet-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -5323,7 +5323,7 @@ dependencies = [ [[package]] name = "pallet-child-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -5342,7 +5342,7 @@ dependencies = [ [[package]] name = "pallet-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -5359,7 +5359,7 @@ dependencies = [ [[package]] name = "pallet-conviction-voting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "assert_matches", "frame-benchmarking", @@ -5376,7 +5376,7 @@ dependencies = [ [[package]] name = "pallet-democracy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -5394,7 +5394,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-multi-phase" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5417,7 +5417,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-support-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5430,7 +5430,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -5449,7 +5449,7 @@ dependencies = [ [[package]] name = "pallet-fast-unstake" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "docify", "frame-benchmarking", @@ -5468,7 +5468,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -5491,7 +5491,7 @@ dependencies = [ [[package]] name = "pallet-identity" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "enumflags2", "frame-benchmarking", @@ -5507,7 +5507,7 @@ dependencies = [ [[package]] name = "pallet-im-online" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -5527,7 +5527,7 @@ dependencies = [ [[package]] name = "pallet-indices" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -5544,7 +5544,7 @@ dependencies = [ [[package]] name = "pallet-membership" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -5561,7 +5561,7 @@ dependencies = [ [[package]] name = "pallet-message-queue" version = "7.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -5580,7 +5580,7 @@ dependencies = [ [[package]] name = "pallet-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -5597,7 +5597,7 @@ dependencies = [ [[package]] name = "pallet-multisig" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -5613,7 +5613,7 @@ dependencies = [ [[package]] name = "pallet-nis" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -5629,7 +5629,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-support", "frame-system", @@ -5648,7 +5648,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-benchmarking" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5668,7 +5668,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-runtime-api" version = "1.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "pallet-nomination-pools", "parity-scale-codec", @@ -5679,7 +5679,7 @@ dependencies = [ [[package]] name = "pallet-offences" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-support", "frame-system", @@ -5696,7 +5696,7 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5720,7 +5720,7 @@ dependencies = [ [[package]] name = "pallet-preimage" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -5737,7 +5737,7 @@ dependencies = [ [[package]] name = "pallet-proxy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -5752,7 +5752,7 @@ dependencies = [ [[package]] name = "pallet-ranked-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -5770,7 +5770,7 @@ dependencies = [ [[package]] name = "pallet-recovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -5785,7 +5785,7 @@ dependencies = [ [[package]] name = "pallet-referenda" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "assert_matches", "frame-benchmarking", @@ -5804,7 +5804,7 @@ dependencies = [ [[package]] name = "pallet-scheduler" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -5821,7 +5821,7 @@ dependencies = [ [[package]] name = "pallet-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-support", "frame-system", @@ -5842,7 +5842,7 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -5858,7 +5858,7 @@ dependencies = [ [[package]] name = "pallet-society" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -5877,7 +5877,7 @@ dependencies = [ [[package]] name = "pallet-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5900,7 +5900,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -5911,7 +5911,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-fn" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "log", "sp-arithmetic", @@ -5920,7 +5920,7 @@ dependencies = [ [[package]] name = "pallet-staking-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "parity-scale-codec", "sp-api", @@ -5929,7 +5929,7 @@ dependencies = [ [[package]] name = "pallet-state-trie-migration" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -5946,7 +5946,7 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -5961,7 +5961,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -5979,7 +5979,7 @@ dependencies = [ [[package]] name = "pallet-tips" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -5998,7 +5998,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-support", "frame-system", @@ -6014,7 +6014,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", @@ -6030,7 +6030,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "pallet-transaction-payment", "parity-scale-codec", @@ -6042,7 +6042,7 @@ dependencies = [ [[package]] name = "pallet-treasury" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -6059,7 +6059,7 @@ dependencies = [ [[package]] name = "pallet-uniques" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -6074,7 +6074,7 @@ dependencies = [ [[package]] name = "pallet-utility" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -6090,7 +6090,7 @@ dependencies = [ [[package]] name = "pallet-vesting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -6105,7 +6105,7 @@ dependencies = [ [[package]] name = "pallet-whitelist" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-benchmarking", "frame-support", @@ -9115,7 +9115,7 @@ dependencies = [ [[package]] name = "sc-allocator" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "log", "sp-core", @@ -9126,7 +9126,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "async-trait", "futures", @@ -9154,7 +9154,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "futures", "futures-timer", @@ -9177,7 +9177,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -9192,7 +9192,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "memmap2", "sc-chain-spec-derive", @@ -9211,7 +9211,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -9222,7 +9222,7 @@ dependencies = [ [[package]] name = "sc-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "array-bytes", "chrono", @@ -9261,7 +9261,7 @@ dependencies = [ [[package]] name = "sc-client-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "fnv", "futures", @@ -9287,7 +9287,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "hash-db", "kvdb", @@ -9313,7 +9313,7 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "async-trait", "futures", @@ -9338,7 +9338,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "async-trait", "fork-tree", @@ -9374,7 +9374,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "futures", "jsonrpsee", @@ -9396,7 +9396,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "array-bytes", "async-channel", @@ -9430,7 +9430,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "futures", "jsonrpsee", @@ -9449,7 +9449,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "fork-tree", "parity-scale-codec", @@ -9462,7 +9462,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "ahash 0.8.2", "array-bytes", @@ -9503,7 +9503,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "finality-grandpa", "futures", @@ -9523,7 +9523,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "async-trait", "futures", @@ -9546,7 +9546,7 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -9568,7 +9568,7 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "sc-allocator", "sp-maybe-compressed-blob", @@ -9580,7 +9580,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "anyhow", "cfg-if", @@ -9597,7 +9597,7 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "ansi_term", "futures", @@ -9613,7 +9613,7 @@ dependencies = [ [[package]] name = "sc-keystore" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "array-bytes", "parking_lot 0.12.1", @@ -9627,7 +9627,7 @@ dependencies = [ [[package]] name = "sc-network" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "array-bytes", "async-channel", @@ -9670,7 +9670,7 @@ dependencies = [ [[package]] name = "sc-network-bitswap" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "async-channel", "cid", @@ -9690,7 +9690,7 @@ dependencies = [ [[package]] name = "sc-network-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "async-trait", "bitflags", @@ -9707,7 +9707,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "ahash 0.8.2", "futures", @@ -9726,7 +9726,7 @@ dependencies = [ [[package]] name = "sc-network-light" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "array-bytes", "async-channel", @@ -9747,7 +9747,7 @@ dependencies = [ [[package]] name = "sc-network-sync" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "array-bytes", "async-channel", @@ -9781,7 +9781,7 @@ dependencies = [ [[package]] name = "sc-network-transactions" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "array-bytes", "futures", @@ -9799,7 +9799,7 @@ dependencies = [ [[package]] name = "sc-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "array-bytes", "bytes", @@ -9833,7 +9833,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -9842,7 +9842,7 @@ dependencies = [ [[package]] name = "sc-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "futures", "jsonrpsee", @@ -9873,7 +9873,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -9892,7 +9892,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "http", "jsonrpsee", @@ -9907,7 +9907,7 @@ dependencies = [ [[package]] name = "sc-rpc-spec-v2" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "array-bytes", "futures", @@ -9933,7 +9933,7 @@ dependencies = [ [[package]] name = "sc-service" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "async-trait", "directories", @@ -9997,7 +9997,7 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "log", "parity-scale-codec", @@ -10008,7 +10008,7 @@ dependencies = [ [[package]] name = "sc-storage-monitor" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "clap 4.2.5", "fs4", @@ -10022,7 +10022,7 @@ dependencies = [ [[package]] name = "sc-sync-state-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -10041,7 +10041,7 @@ dependencies = [ [[package]] name = "sc-sysinfo" version = "6.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "futures", "libc", @@ -10060,7 +10060,7 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "chrono", "futures", @@ -10079,7 +10079,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "ansi_term", "atty", @@ -10108,7 +10108,7 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -10119,7 +10119,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "async-trait", "futures", @@ -10145,7 +10145,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "async-trait", "futures", @@ -10161,7 +10161,7 @@ dependencies = [ [[package]] name = "sc-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "async-channel", "futures", @@ -10709,7 +10709,7 @@ dependencies = [ [[package]] name = "sp-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "hash-db", "log", @@ -10730,7 +10730,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "Inflector", "blake2", @@ -10744,7 +10744,7 @@ dependencies = [ [[package]] name = "sp-application-crypto" version = "23.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "parity-scale-codec", "scale-info", @@ -10757,7 +10757,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "16.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "integer-sqrt", "num-traits", @@ -10771,7 +10771,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "parity-scale-codec", "scale-info", @@ -10784,7 +10784,7 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "sp-api", "sp-inherents", @@ -10795,7 +10795,7 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "futures", "log", @@ -10813,7 +10813,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "async-trait", "futures", @@ -10828,7 +10828,7 @@ dependencies = [ [[package]] name = "sp-consensus-aura" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "async-trait", "parity-scale-codec", @@ -10845,7 +10845,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "async-trait", "parity-scale-codec", @@ -10864,7 +10864,7 @@ dependencies = [ [[package]] name = "sp-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "lazy_static", "parity-scale-codec", @@ -10883,7 +10883,7 @@ dependencies = [ [[package]] name = "sp-consensus-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "finality-grandpa", "log", @@ -10901,7 +10901,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "parity-scale-codec", "scale-info", @@ -10913,7 +10913,7 @@ dependencies = [ [[package]] name = "sp-core" version = "21.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "array-bytes", "bitflags", @@ -10958,7 +10958,7 @@ dependencies = [ [[package]] name = "sp-core-hashing" version = "9.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "blake2b_simd", "byteorder", @@ -10971,7 +10971,7 @@ dependencies = [ [[package]] name = "sp-core-hashing-proc-macro" version = "9.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "quote", "sp-core-hashing", @@ -10981,7 +10981,7 @@ dependencies = [ [[package]] name = "sp-database" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "kvdb", "parking_lot 0.12.1", @@ -10990,7 +10990,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "proc-macro2", "quote", @@ -11000,7 +11000,7 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.19.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "environmental", "parity-scale-codec", @@ -11011,7 +11011,7 @@ dependencies = [ [[package]] name = "sp-genesis-builder" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "serde_json", "sp-api", @@ -11022,7 +11022,7 @@ dependencies = [ [[package]] name = "sp-inherents" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -11036,7 +11036,7 @@ dependencies = [ [[package]] name = "sp-io" version = "23.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "bytes", "ed25519", @@ -11061,7 +11061,7 @@ dependencies = [ [[package]] name = "sp-keyring" version = "24.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "lazy_static", "sp-core", @@ -11072,7 +11072,7 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.27.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -11084,7 +11084,7 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "thiserror", "zstd 0.12.3+zstd.1.5.2", @@ -11093,7 +11093,7 @@ dependencies = [ [[package]] name = "sp-metadata-ir" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-metadata", "parity-scale-codec", @@ -11104,7 +11104,7 @@ dependencies = [ [[package]] name = "sp-mmr-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "ckb-merkle-mountain-range", "log", @@ -11122,7 +11122,7 @@ dependencies = [ [[package]] name = "sp-npos-elections" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "parity-scale-codec", "scale-info", @@ -11136,7 +11136,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "sp-api", "sp-core", @@ -11146,7 +11146,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "backtrace", "lazy_static", @@ -11156,7 +11156,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "rustc-hash", "serde", @@ -11166,7 +11166,7 @@ dependencies = [ [[package]] name = "sp-runtime" version = "24.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "either", "hash256-std-hasher", @@ -11188,7 +11188,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "17.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -11206,7 +11206,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "11.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "Inflector", "proc-macro-crate", @@ -11218,7 +11218,7 @@ dependencies = [ [[package]] name = "sp-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "parity-scale-codec", "scale-info", @@ -11233,7 +11233,7 @@ dependencies = [ [[package]] name = "sp-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -11247,7 +11247,7 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.28.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "hash-db", "log", @@ -11268,7 +11268,7 @@ dependencies = [ [[package]] name = "sp-statement-store" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "aes-gcm 0.10.2", "curve25519-dalek 3.2.0", @@ -11292,12 +11292,12 @@ dependencies = [ [[package]] name = "sp-std" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" [[package]] name = "sp-storage" version = "13.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "impl-serde", "parity-scale-codec", @@ -11310,7 +11310,7 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "async-trait", "parity-scale-codec", @@ -11323,7 +11323,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "10.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "parity-scale-codec", "sp-std", @@ -11335,7 +11335,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "sp-api", "sp-runtime", @@ -11344,7 +11344,7 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "async-trait", "parity-scale-codec", @@ -11359,7 +11359,7 @@ dependencies = [ [[package]] name = "sp-trie" version = "22.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "ahash 0.8.2", "hash-db", @@ -11382,7 +11382,7 @@ dependencies = [ [[package]] name = "sp-version" version = "22.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "impl-serde", "parity-scale-codec", @@ -11399,7 +11399,7 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "parity-scale-codec", "proc-macro2", @@ -11410,7 +11410,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "14.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "anyhow", "impl-trait-for-tuples", @@ -11423,7 +11423,7 @@ dependencies = [ [[package]] name = "sp-weights" version = "20.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "parity-scale-codec", "scale-info", @@ -11648,12 +11648,12 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" [[package]] name = "substrate-frame-rpc-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "frame-system-rpc-runtime-api", "futures", @@ -11672,7 +11672,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "hyper", "log", @@ -11684,7 +11684,7 @@ dependencies = [ [[package]] name = "substrate-rpc-client" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "async-trait", "jsonrpsee", @@ -11697,7 +11697,7 @@ dependencies = [ [[package]] name = "substrate-state-trie-migration-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -11714,7 +11714,7 @@ dependencies = [ [[package]] name = "substrate-test-client" version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "array-bytes", "async-trait", @@ -11740,7 +11740,7 @@ dependencies = [ [[package]] name = "substrate-test-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "futures", "substrate-test-utils-derive", @@ -11750,7 +11750,7 @@ dependencies = [ [[package]] name = "substrate-test-utils-derive" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -11761,7 +11761,7 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "ansi_term", "build-helper", @@ -12638,7 +12638,7 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "try-runtime-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#109e1d8ad24de24f90c0c629490b8a7f648e741d" +source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" dependencies = [ "async-trait", "clap 4.2.5", diff --git a/node/service/src/chain_spec.rs b/node/service/src/chain_spec.rs index 3928b8618659..a9e6b45f3b2d 100644 --- a/node/service/src/chain_spec.rs +++ b/node/service/src/chain_spec.rs @@ -16,7 +16,7 @@ //! Polkadot chain configurations. -use beefy_primitives::crypto::AuthorityId as BeefyId; +use beefy_primitives::ecdsa_crypto::AuthorityId as BeefyId; use grandpa::AuthorityId as GrandpaId; #[cfg(feature = "kusama-native")] use kusama_runtime as kusama; diff --git a/node/service/src/fake_runtime_api.rs b/node/service/src/fake_runtime_api.rs index f9d7799d8262..b322114cbb75 100644 --- a/node/service/src/fake_runtime_api.rs +++ b/node/service/src/fake_runtime_api.rs @@ -18,7 +18,7 @@ //! //! These are used to provide a type that implements these runtime APIs without requiring to import the native runtimes. -use beefy_primitives::crypto::{AuthorityId as BeefyId, Signature as BeefySignature}; +use beefy_primitives::ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}; use grandpa_primitives::AuthorityId as GrandpaId; use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo}; use polkadot_primitives::{ @@ -230,7 +230,7 @@ sp_api::impl_runtime_apis! { } } - impl beefy_primitives::BeefyApi for Runtime { + impl beefy_primitives::BeefyApi for Runtime { fn beefy_genesis() -> Option { unimplemented!() } diff --git a/runtime/kusama/src/lib.rs b/runtime/kusama/src/lib.rs index 6779caad84be..335ef79fab58 100644 --- a/runtime/kusama/src/lib.rs +++ b/runtime/kusama/src/lib.rs @@ -51,7 +51,7 @@ use runtime_parachains::{ }; use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; -use beefy_primitives::crypto::{AuthorityId as BeefyId, Signature as BeefySignature}; +use beefy_primitives::ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}; use frame_election_provider_support::{ generate_solution_type, onchain, NposSolution, SequentialPhragmen, }; @@ -1816,7 +1816,7 @@ sp_api::impl_runtime_apis! { } } - impl beefy_primitives::BeefyApi for Runtime { + impl beefy_primitives::BeefyApi for Runtime { fn beefy_genesis() -> Option { // dummy implementation due to lack of BEEFY pallet. None diff --git a/runtime/kusama/src/xcm_config.rs b/runtime/kusama/src/xcm_config.rs index 3b5335a33ff8..59e32f2ca544 100644 --- a/runtime/kusama/src/xcm_config.rs +++ b/runtime/kusama/src/xcm_config.rs @@ -432,8 +432,10 @@ fn karura_liquid_staking_xcm_has_sane_weight_upper_limt() { // should be [WithdrawAsset, BuyExecution, Transact, RefundSurplus, DepositAsset] let blob = hex_literal::hex!("02140004000000000700e40b540213000000000700e40b54020006010700c817a804341801000006010b00c490bf4302140d010003ffffffff000100411f"); - let Ok(VersionedXcm::V2(old_xcm)) = - VersionedXcm::::decode(&mut &blob[..]) else { panic!("can't decode XCM blob") }; + let Ok(VersionedXcm::V2(old_xcm)) = VersionedXcm::::decode(&mut &blob[..]) + else { + panic!("can't decode XCM blob") + }; let mut xcm: Xcm = old_xcm.try_into().expect("conversion from v2 to v3 failed"); let weight = ::Weigher::weight(&mut xcm) @@ -444,9 +446,10 @@ fn karura_liquid_staking_xcm_has_sane_weight_upper_limt() { assert!(weight.all_lte(Weight::from_parts(30_313_281_000, 72_722))); let Some(Transact { require_weight_at_most, call, .. }) = - xcm.inner_mut().into_iter().find(|inst| matches!(inst, Transact { .. })) else { - panic!("no Transact instruction found") - }; + xcm.inner_mut().into_iter().find(|inst| matches!(inst, Transact { .. })) + else { + panic!("no Transact instruction found") + }; // should be pallet_utility.as_derivative { index: 0, call: pallet_staking::bond_extra { max_additional: 2490000000000 } } let message_call = call.take_decoded().expect("can't decode Transact call"); let call_weight = message_call.get_dispatch_info().weight; diff --git a/runtime/polkadot/src/lib.rs b/runtime/polkadot/src/lib.rs index 55ddb4707791..585e48dd5a4b 100644 --- a/runtime/polkadot/src/lib.rs +++ b/runtime/polkadot/src/lib.rs @@ -39,7 +39,7 @@ use runtime_parachains::{ }; use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; -use beefy_primitives::crypto::{AuthorityId as BeefyId, Signature as BeefySignature}; +use beefy_primitives::ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}; use frame_election_provider_support::{generate_solution_type, onchain, SequentialPhragmen}; use frame_support::{ construct_runtime, parameter_types, @@ -1819,7 +1819,7 @@ sp_api::impl_runtime_apis! { } } - impl beefy_primitives::BeefyApi for Runtime { + impl beefy_primitives::BeefyApi for Runtime { fn beefy_genesis() -> Option { // dummy implementation due to lack of BEEFY pallet. None diff --git a/runtime/rococo/src/lib.rs b/runtime/rococo/src/lib.rs index 9800fe0ff754..31b657c3a5fb 100644 --- a/runtime/rococo/src/lib.rs +++ b/runtime/rococo/src/lib.rs @@ -51,7 +51,7 @@ use runtime_parachains::{ use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; use beefy_primitives::{ - crypto::{AuthorityId as BeefyId, Signature as BeefySignature}, + ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}, mmr::{BeefyDataProvider, MmrLeafVersion}, }; @@ -1819,8 +1819,8 @@ sp_api::impl_runtime_apis! { } } - #[api_version(2)] - impl beefy_primitives::BeefyApi for Runtime { + #[api_version(3)] + impl beefy_primitives::BeefyApi for Runtime { fn beefy_genesis() -> Option { Beefy::genesis_block() } diff --git a/runtime/test-runtime/src/lib.rs b/runtime/test-runtime/src/lib.rs index 035adcc007c2..9e2f2a66455b 100644 --- a/runtime/test-runtime/src/lib.rs +++ b/runtime/test-runtime/src/lib.rs @@ -35,7 +35,7 @@ use polkadot_runtime_parachains::{ }; use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; -use beefy_primitives::crypto::{AuthorityId as BeefyId, Signature as BeefySignature}; +use beefy_primitives::ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}; use frame_election_provider_support::{onchain, SequentialPhragmen}; use frame_support::{ construct_runtime, parameter_types, @@ -939,7 +939,7 @@ sp_api::impl_runtime_apis! { } } - impl beefy_primitives::BeefyApi for Runtime { + impl beefy_primitives::BeefyApi for Runtime { fn beefy_genesis() -> Option { // dummy implementation due to lack of BEEFY pallet. None diff --git a/runtime/westend/src/lib.rs b/runtime/westend/src/lib.rs index 098e65d43de4..dd4bcff32e39 100644 --- a/runtime/westend/src/lib.rs +++ b/runtime/westend/src/lib.rs @@ -21,7 +21,7 @@ #![recursion_limit = "256"] use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; -use beefy_primitives::crypto::{AuthorityId as BeefyId, Signature as BeefySignature}; +use beefy_primitives::ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}; use frame_election_provider_support::{onchain, SequentialPhragmen}; use frame_support::{ construct_runtime, parameter_types, @@ -1554,7 +1554,7 @@ sp_api::impl_runtime_apis! { } } - impl beefy_primitives::BeefyApi for Runtime { + impl beefy_primitives::BeefyApi for Runtime { fn beefy_genesis() -> Option { // dummy implementation due to lack of BEEFY pallet. None From b137472a422477994fa4be88d3751b312d0b09fd Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Thu, 3 Aug 2023 10:32:49 +0200 Subject: [PATCH 13/35] [xcm] `GlobalConsensusConvertsFor` for remote relay chain (based on pevious GlobalConsensusParachainConvertsFor) (#7517) * [xcm] `GlobalConsensusConvertsFor` for remote relay chain (based on previous GlobalConsensusParachainConvertsFor) * Typo * PR fix (constants in test) * Re-export of `GlobalConsensusConvertsFor` * assert to panic * Update xcm/src/v3/multiasset.rs Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> * Update xcm/xcm-builder/src/location_conversion.rs Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> * Update xcm/xcm-builder/src/location_conversion.rs Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> * Review fixes --------- Co-authored-by: parity-processbot <> Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- xcm/src/v3/multiasset.rs | 2 +- xcm/xcm-builder/src/lib.rs | 5 +- xcm/xcm-builder/src/location_conversion.rs | 135 +++++++++++++++++++++ 3 files changed, 139 insertions(+), 3 deletions(-) diff --git a/xcm/src/v3/multiasset.rs b/xcm/src/v3/multiasset.rs index a7a658f06017..a4900a71539a 100644 --- a/xcm/src/v3/multiasset.rs +++ b/xcm/src/v3/multiasset.rs @@ -406,7 +406,7 @@ pub struct MultiAsset { /// The overall asset identity (aka *class*, in the case of a non-fungible). pub id: AssetId, /// The fungibility of the asset, which contains either the amount (in the case of a fungible - /// asset) or the *instance ID`, the secondary asset identifier. + /// asset) or the *instance ID*, the secondary asset identifier. pub fun: Fungibility, } diff --git a/xcm/xcm-builder/src/lib.rs b/xcm/xcm-builder/src/lib.rs index 984ace84dc69..e3f910409638 100644 --- a/xcm/xcm-builder/src/lib.rs +++ b/xcm/xcm-builder/src/lib.rs @@ -33,8 +33,9 @@ pub use location_conversion::{ Account32Hash, AccountId32Aliases, AccountKey20Aliases, AliasesIntoAccountId32, ChildParachainConvertsVia, DescribeAccountId32Terminal, DescribeAccountIdTerminal, DescribeAccountKey20Terminal, DescribeAllTerminal, DescribeFamily, DescribeLocation, - DescribePalletTerminal, DescribeTerminus, GlobalConsensusParachainConvertsFor, - HashedDescription, ParentIsPreset, SiblingParachainConvertsVia, + DescribePalletTerminal, DescribeTerminus, GlobalConsensusConvertsFor, + GlobalConsensusParachainConvertsFor, HashedDescription, ParentIsPreset, + SiblingParachainConvertsVia, }; mod origin_conversion; diff --git a/xcm/xcm-builder/src/location_conversion.rs b/xcm/xcm-builder/src/location_conversion.rs index 3a95884328a1..ccc3cc040e61 100644 --- a/xcm/xcm-builder/src/location_conversion.rs +++ b/xcm/xcm-builder/src/location_conversion.rs @@ -345,6 +345,42 @@ impl>, AccountId: From<[u8; 20]> + Into<[u8; 20]> } } +/// Converts a location which is a top-level relay chain (which provides its own consensus) into a 32-byte `AccountId`. +/// +/// This will always result in the *same account ID* being returned for the same Relay-chain, regardless of the relative security of +/// this Relay-chain compared to the local chain. +/// +/// Note: No distinction is made between the cases when the given `UniversalLocation` lies within +/// the same consensus system (i.e. is itself or a parent) and when it is a foreign consensus +/// system. +pub struct GlobalConsensusConvertsFor( + PhantomData<(UniversalLocation, AccountId)>, +); +impl, AccountId: From<[u8; 32]> + Clone> + ConvertLocation for GlobalConsensusConvertsFor +{ + fn convert_location(location: &MultiLocation) -> Option { + let universal_source = UniversalLocation::get(); + log::trace!( + target: "xcm::location_conversion", + "GlobalConsensusConvertsFor universal_source: {:?}, location: {:?}", + universal_source, location, + ); + let (remote_network, remote_location) = + ensure_is_remote(universal_source, *location).ok()?; + + match remote_location { + Here => Some(AccountId::from(Self::from_params(&remote_network))), + _ => None, + } + } +} +impl GlobalConsensusConvertsFor { + fn from_params(network: &NetworkId) -> [u8; 32] { + (b"glblcnsnss_", network).using_encoded(blake2_256) + } +} + /// Converts a location which is a top-level parachain (i.e. a parachain held on a /// Relay-chain which provides its own consensus) into a 32-byte `AccountId`. /// @@ -473,6 +509,105 @@ mod tests { assert_eq!(inverted, Err(())); } + #[test] + fn global_consensus_converts_for_works() { + parameter_types! { + pub UniversalLocationInNetwork1: InteriorMultiLocation = X2(GlobalConsensus(ByGenesis([1; 32])), Parachain(1234)); + pub UniversalLocationInNetwork2: InteriorMultiLocation = X2(GlobalConsensus(ByGenesis([2; 32])), Parachain(1234)); + } + let network_1 = UniversalLocationInNetwork1::get().global_consensus().expect("NetworkId"); + let network_2 = UniversalLocationInNetwork2::get().global_consensus().expect("NetworkId"); + let network_3 = ByGenesis([3; 32]); + let network_4 = ByGenesis([4; 32]); + let network_5 = ByGenesis([5; 32]); + + let test_data = vec![ + (MultiLocation::parent(), false), + (MultiLocation::new(0, Here), false), + (MultiLocation::new(0, X1(GlobalConsensus(network_1))), false), + (MultiLocation::new(1, X1(GlobalConsensus(network_1))), false), + (MultiLocation::new(2, X1(GlobalConsensus(network_1))), false), + (MultiLocation::new(0, X1(GlobalConsensus(network_2))), false), + (MultiLocation::new(1, X1(GlobalConsensus(network_2))), false), + (MultiLocation::new(2, X1(GlobalConsensus(network_2))), true), + (MultiLocation::new(0, X2(GlobalConsensus(network_2), Parachain(1000))), false), + (MultiLocation::new(1, X2(GlobalConsensus(network_2), Parachain(1000))), false), + (MultiLocation::new(2, X2(GlobalConsensus(network_2), Parachain(1000))), false), + ]; + + for (location, expected_result) in test_data { + let result = + GlobalConsensusConvertsFor::::convert_location( + &location, + ); + match result { + Some(account) => { + assert_eq!( + true, expected_result, + "expected_result: {}, but conversion passed: {:?}, location: {:?}", + expected_result, account, location + ); + match &location { + MultiLocation { interior: X1(GlobalConsensus(network)), .. } => + assert_eq!( + account, + GlobalConsensusConvertsFor::::from_params(network), + "expected_result: {}, but conversion passed: {:?}, location: {:?}", expected_result, account, location + ), + _ => panic!("expected_result: {}, conversion passed: {:?}, but MultiLocation does not match expected pattern, location: {:?}", expected_result, account, location) + } + }, + None => { + assert_eq!( + false, expected_result, + "expected_result: {} - but conversion failed, location: {:?}", + expected_result, location + ); + }, + } + } + + // all success + let res_1_gc_network_3 = + GlobalConsensusConvertsFor::::convert_location( + &MultiLocation::new(2, X1(GlobalConsensus(network_3))), + ) + .expect("conversion is ok"); + let res_2_gc_network_3 = + GlobalConsensusConvertsFor::::convert_location( + &MultiLocation::new(2, X1(GlobalConsensus(network_3))), + ) + .expect("conversion is ok"); + let res_1_gc_network_4 = + GlobalConsensusConvertsFor::::convert_location( + &MultiLocation::new(2, X1(GlobalConsensus(network_4))), + ) + .expect("conversion is ok"); + let res_2_gc_network_4 = + GlobalConsensusConvertsFor::::convert_location( + &MultiLocation::new(2, X1(GlobalConsensus(network_4))), + ) + .expect("conversion is ok"); + let res_1_gc_network_5 = + GlobalConsensusConvertsFor::::convert_location( + &MultiLocation::new(2, X1(GlobalConsensus(network_5))), + ) + .expect("conversion is ok"); + let res_2_gc_network_5 = + GlobalConsensusConvertsFor::::convert_location( + &MultiLocation::new(2, X1(GlobalConsensus(network_5))), + ) + .expect("conversion is ok"); + + assert_ne!(res_1_gc_network_3, res_1_gc_network_4); + assert_ne!(res_1_gc_network_4, res_1_gc_network_5); + assert_ne!(res_1_gc_network_3, res_1_gc_network_5); + + assert_eq!(res_1_gc_network_3, res_2_gc_network_3); + assert_eq!(res_1_gc_network_4, res_2_gc_network_4); + assert_eq!(res_1_gc_network_5, res_2_gc_network_5); + } + #[test] fn global_consensus_parachain_converts_for_works() { parameter_types! { From b810ce4ef049f17ba9ac20a7c22a5ec8821314bb Mon Sep 17 00:00:00 2001 From: Andrei Eres Date: Fri, 4 Aug 2023 16:27:55 +0200 Subject: [PATCH 14/35] Fix flaky reputation change test (#7550) * Fix flaky reputation change test * Remove fixme Co-authored-by: Oliver Tale-Yazdi --------- Co-authored-by: Oliver Tale-Yazdi Co-authored-by: parity-processbot <> --- node/network/bitfield-distribution/src/tests.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/node/network/bitfield-distribution/src/tests.rs b/node/network/bitfield-distribution/src/tests.rs index eb25a0d34206..39816a55240b 100644 --- a/node/network/bitfield-distribution/src/tests.rs +++ b/node/network/bitfield-distribution/src/tests.rs @@ -428,8 +428,6 @@ fn receive_duplicate_messages() { } #[test] -// FIXME -#[cfg(feature = "enable-flaky")] fn delay_reputation_change() { use polkadot_node_subsystem_util::reputation::add_reputation; @@ -471,7 +469,7 @@ fn delay_reputation_change() { let pool = sp_core::testing::TaskExecutor::new(); let (ctx, mut handle) = make_subsystem_context::(pool); let mut rng = dummy_rng(); - let reputation_interval = Duration::from_millis(1); + let reputation_interval = Duration::from_millis(100); let bg = async move { let subsystem = BitfieldDistribution::new(Default::default()); From 314e519e2969052ed5566dd457f120451b125286 Mon Sep 17 00:00:00 2001 From: Lulu Date: Fri, 4 Aug 2023 17:33:05 +0100 Subject: [PATCH 15/35] Add license to crates (#7578) * Add license to crates This is required to publish to crates.io * Add more licenses --- Cargo.toml | 3 ++- cli/Cargo.toml | 1 + core-primitives/Cargo.toml | 1 + erasure-coding/Cargo.toml | 1 + erasure-coding/fuzzer/Cargo.toml | 1 + node/collation-generation/Cargo.toml | 1 + node/core/approval-voting/Cargo.toml | 1 + node/core/av-store/Cargo.toml | 1 + node/core/backing/Cargo.toml | 1 + node/core/bitfield-signing/Cargo.toml | 1 + node/core/candidate-validation/Cargo.toml | 1 + node/core/chain-api/Cargo.toml | 1 + node/core/chain-selection/Cargo.toml | 1 + node/core/dispute-coordinator/Cargo.toml | 1 + node/core/parachains-inherent/Cargo.toml | 1 + node/core/provisioner/Cargo.toml | 1 + node/core/pvf-checker/Cargo.toml | 1 + node/core/pvf/Cargo.toml | 1 + node/core/pvf/common/Cargo.toml | 1 + node/core/pvf/execute-worker/Cargo.toml | 1 + node/core/pvf/prepare-worker/Cargo.toml | 1 + node/core/runtime-api/Cargo.toml | 1 + node/gum/Cargo.toml | 1 + node/gum/proc-macro/Cargo.toml | 1 + node/jaeger/Cargo.toml | 1 + node/malus/Cargo.toml | 2 +- node/metrics/Cargo.toml | 1 + node/network/approval-distribution/Cargo.toml | 1 + node/network/availability-distribution/Cargo.toml | 1 + node/network/availability-recovery/Cargo.toml | 1 + node/network/bitfield-distribution/Cargo.toml | 1 + node/network/bridge/Cargo.toml | 1 + node/network/collator-protocol/Cargo.toml | 1 + node/network/dispute-distribution/Cargo.toml | 1 + node/network/gossip-support/Cargo.toml | 1 + node/network/protocol/Cargo.toml | 1 + node/network/statement-distribution/Cargo.toml | 1 + node/overseer/Cargo.toml | 1 + node/primitives/Cargo.toml | 1 + node/service/Cargo.toml | 1 + node/subsystem-test-helpers/Cargo.toml | 1 + node/subsystem-types/Cargo.toml | 1 + node/subsystem-util/Cargo.toml | 1 + node/subsystem/Cargo.toml | 1 + node/test/client/Cargo.toml | 1 + node/test/performance-test/Cargo.toml | 1 + node/test/service/Cargo.toml | 1 + node/zombienet-backchannel/Cargo.toml | 2 +- parachain/Cargo.toml | 1 + parachain/test-parachains/Cargo.toml | 1 + parachain/test-parachains/adder/Cargo.toml | 1 + parachain/test-parachains/adder/collator/Cargo.toml | 1 + parachain/test-parachains/halt/Cargo.toml | 1 + parachain/test-parachains/undying/Cargo.toml | 1 + parachain/test-parachains/undying/collator/Cargo.toml | 1 + primitives/Cargo.toml | 1 + primitives/test-helpers/Cargo.toml | 1 + rpc/Cargo.toml | 1 + runtime/common/Cargo.toml | 1 + runtime/common/slot_range_helper/Cargo.toml | 1 + runtime/kusama/Cargo.toml | 1 + runtime/kusama/constants/Cargo.toml | 1 + runtime/metrics/Cargo.toml | 1 + runtime/parachains/Cargo.toml | 1 + runtime/polkadot/Cargo.toml | 1 + runtime/polkadot/constants/Cargo.toml | 1 + runtime/rococo/Cargo.toml | 1 + runtime/rococo/constants/Cargo.toml | 1 + runtime/test-runtime/Cargo.toml | 1 + runtime/test-runtime/constants/Cargo.toml | 1 + runtime/westend/Cargo.toml | 1 + runtime/westend/constants/Cargo.toml | 1 + statement-table/Cargo.toml | 1 + utils/generate-bags/Cargo.toml | 1 + utils/remote-ext-tests/bags-list/Cargo.toml | 1 + utils/staking-miner/Cargo.toml | 1 + xcm/Cargo.toml | 1 + xcm/pallet-xcm-benchmarks/Cargo.toml | 1 + xcm/pallet-xcm/Cargo.toml | 1 + xcm/procedural/Cargo.toml | 1 + xcm/xcm-builder/Cargo.toml | 1 + xcm/xcm-executor/Cargo.toml | 1 + xcm/xcm-executor/integration-tests/Cargo.toml | 1 + xcm/xcm-simulator/Cargo.toml | 1 + xcm/xcm-simulator/example/Cargo.toml | 1 + xcm/xcm-simulator/fuzzer/Cargo.toml | 1 + 86 files changed, 87 insertions(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 6e97e9ba5878..05ec768a9771 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,17 +13,18 @@ path = "src/bin/prepare-worker.rs" [package] name = "polkadot" description = "Implementation of a `https://polkadot.network` node in Rust based on the Substrate framework." -license = "GPL-3.0-only" rust-version = "1.64.0" # workspace properties readme = "README.md" default-run = "polkadot" authors.workspace = true edition.workspace = true +license.workspace = true version.workspace = true [workspace.package] authors = ["Parity Technologies "] edition = "2021" +license = "GPL-3.0-only" repository = "https://github.com/paritytech/polkadot.git" version = "0.9.43" diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 7b782644125a..df1a22d8c18d 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -4,6 +4,7 @@ description = "Polkadot Relay-chain Client Node" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [package.metadata.wasm-pack.profile.release] # `wasm-opt` has some problems on Linux, see diff --git a/core-primitives/Cargo.toml b/core-primitives/Cargo.toml index 7412c2b6e7e0..0ed315e1307e 100644 --- a/core-primitives/Cargo.toml +++ b/core-primitives/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-core-primitives" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } diff --git a/erasure-coding/Cargo.toml b/erasure-coding/Cargo.toml index 44aa86f881c8..8dfb775c6f49 100644 --- a/erasure-coding/Cargo.toml +++ b/erasure-coding/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-erasure-coding" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] polkadot-primitives = { path = "../primitives" } diff --git a/erasure-coding/fuzzer/Cargo.toml b/erasure-coding/fuzzer/Cargo.toml index e67a00cb0c1a..9dee8e657840 100644 --- a/erasure-coding/fuzzer/Cargo.toml +++ b/erasure-coding/fuzzer/Cargo.toml @@ -3,6 +3,7 @@ name = "erasure_coding_fuzzer" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true publish = false [dependencies] diff --git a/node/collation-generation/Cargo.toml b/node/collation-generation/Cargo.toml index a7badc877d45..d48167af3902 100644 --- a/node/collation-generation/Cargo.toml +++ b/node/collation-generation/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-node-collation-generation" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] futures = "0.3.21" diff --git a/node/core/approval-voting/Cargo.toml b/node/core/approval-voting/Cargo.toml index a13065528fe2..a42e449f09f9 100644 --- a/node/core/approval-voting/Cargo.toml +++ b/node/core/approval-voting/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-node-core-approval-voting" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] futures = "0.3.21" diff --git a/node/core/av-store/Cargo.toml b/node/core/av-store/Cargo.toml index 72d8e111480f..4861386bfd7a 100644 --- a/node/core/av-store/Cargo.toml +++ b/node/core/av-store/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-node-core-av-store" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] futures = "0.3.21" diff --git a/node/core/backing/Cargo.toml b/node/core/backing/Cargo.toml index 7428372eec62..d611784d7bba 100644 --- a/node/core/backing/Cargo.toml +++ b/node/core/backing/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-node-core-backing" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] futures = "0.3.21" diff --git a/node/core/bitfield-signing/Cargo.toml b/node/core/bitfield-signing/Cargo.toml index b94cc6708c74..ee147fb5c22a 100644 --- a/node/core/bitfield-signing/Cargo.toml +++ b/node/core/bitfield-signing/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-node-core-bitfield-signing" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] futures = "0.3.21" diff --git a/node/core/candidate-validation/Cargo.toml b/node/core/candidate-validation/Cargo.toml index c0fca9a49996..ba40fea8140b 100644 --- a/node/core/candidate-validation/Cargo.toml +++ b/node/core/candidate-validation/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-node-core-candidate-validation" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] async-trait = "0.1.57" diff --git a/node/core/chain-api/Cargo.toml b/node/core/chain-api/Cargo.toml index 8400a2a5bf86..69d737ca2973 100644 --- a/node/core/chain-api/Cargo.toml +++ b/node/core/chain-api/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-node-core-chain-api" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] futures = "0.3.21" diff --git a/node/core/chain-selection/Cargo.toml b/node/core/chain-selection/Cargo.toml index b4104bc6f12d..071fec4415a4 100644 --- a/node/core/chain-selection/Cargo.toml +++ b/node/core/chain-selection/Cargo.toml @@ -4,6 +4,7 @@ description = "Chain Selection Subsystem" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] futures = "0.3.21" diff --git a/node/core/dispute-coordinator/Cargo.toml b/node/core/dispute-coordinator/Cargo.toml index 2ae1b73d6416..e4069f7f3330 100644 --- a/node/core/dispute-coordinator/Cargo.toml +++ b/node/core/dispute-coordinator/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-node-core-dispute-coordinator" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] futures = "0.3.21" diff --git a/node/core/parachains-inherent/Cargo.toml b/node/core/parachains-inherent/Cargo.toml index d4301cb22270..fdf785fbe66b 100644 --- a/node/core/parachains-inherent/Cargo.toml +++ b/node/core/parachains-inherent/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-node-core-parachains-inherent" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] futures = "0.3.21" diff --git a/node/core/provisioner/Cargo.toml b/node/core/provisioner/Cargo.toml index 7c07118f1f3f..dac671e7ada5 100644 --- a/node/core/provisioner/Cargo.toml +++ b/node/core/provisioner/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-node-core-provisioner" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } diff --git a/node/core/pvf-checker/Cargo.toml b/node/core/pvf-checker/Cargo.toml index d41955d9bac5..ee7001524265 100644 --- a/node/core/pvf-checker/Cargo.toml +++ b/node/core/pvf-checker/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-node-core-pvf-checker" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] futures = "0.3.21" diff --git a/node/core/pvf/Cargo.toml b/node/core/pvf/Cargo.toml index 658a26cff09d..d6e9ef576628 100644 --- a/node/core/pvf/Cargo.toml +++ b/node/core/pvf/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-node-core-pvf" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [[bin]] name = "puppet_worker" diff --git a/node/core/pvf/common/Cargo.toml b/node/core/pvf/common/Cargo.toml index be119297cbc3..3e674422f812 100644 --- a/node/core/pvf/common/Cargo.toml +++ b/node/core/pvf/common/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-node-core-pvf-common" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] cpu-time = "1.0.0" diff --git a/node/core/pvf/execute-worker/Cargo.toml b/node/core/pvf/execute-worker/Cargo.toml index 167e8b4311a3..1c9e2d1f2784 100644 --- a/node/core/pvf/execute-worker/Cargo.toml +++ b/node/core/pvf/execute-worker/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-node-core-pvf-execute-worker" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] cpu-time = "1.0.0" diff --git a/node/core/pvf/prepare-worker/Cargo.toml b/node/core/pvf/prepare-worker/Cargo.toml index 4e7f61297707..ec5b1e9c2399 100644 --- a/node/core/pvf/prepare-worker/Cargo.toml +++ b/node/core/pvf/prepare-worker/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-node-core-pvf-prepare-worker" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] futures = "0.3.21" diff --git a/node/core/runtime-api/Cargo.toml b/node/core/runtime-api/Cargo.toml index 56c36b74757b..22b4a96e60e8 100644 --- a/node/core/runtime-api/Cargo.toml +++ b/node/core/runtime-api/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-node-core-runtime-api" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] futures = "0.3.21" diff --git a/node/gum/Cargo.toml b/node/gum/Cargo.toml index 6bd4f0758894..9e9e78450292 100644 --- a/node/gum/Cargo.toml +++ b/node/gum/Cargo.toml @@ -3,6 +3,7 @@ name = "tracing-gum" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true description = "Stick logs together with the TraceID as provided by tempo" [dependencies] diff --git a/node/gum/proc-macro/Cargo.toml b/node/gum/proc-macro/Cargo.toml index a762d72c8fca..61f31beb61f3 100644 --- a/node/gum/proc-macro/Cargo.toml +++ b/node/gum/proc-macro/Cargo.toml @@ -3,6 +3,7 @@ name = "tracing-gum-proc-macro" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true description = "Generate an overseer including builder pattern and message wrapper from a single annotated struct definition." [package.metadata.docs.rs] diff --git a/node/jaeger/Cargo.toml b/node/jaeger/Cargo.toml index 8131d4076674..b563b33842b5 100644 --- a/node/jaeger/Cargo.toml +++ b/node/jaeger/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-node-jaeger" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true description = "Polkadot Jaeger primitives, but equally useful for Grafana/Tempo" [dependencies] diff --git a/node/malus/Cargo.toml b/node/malus/Cargo.toml index 7e0bf0d8dd08..08656ea9f3da 100644 --- a/node/malus/Cargo.toml +++ b/node/malus/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "polkadot-test-malus" description = "Misbehaving nodes for local testnets, system and Simnet tests." -license = "GPL-3.0-only" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true readme = "README.md" publish = false diff --git a/node/metrics/Cargo.toml b/node/metrics/Cargo.toml index 2e9bc22d1cb3..fdb42a1dcde0 100644 --- a/node/metrics/Cargo.toml +++ b/node/metrics/Cargo.toml @@ -4,6 +4,7 @@ description = "Subsystem metric helpers" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] futures = "0.3.21" diff --git a/node/network/approval-distribution/Cargo.toml b/node/network/approval-distribution/Cargo.toml index 34afca6d48f0..bd683b320285 100644 --- a/node/network/approval-distribution/Cargo.toml +++ b/node/network/approval-distribution/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-approval-distribution" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] polkadot-node-metrics = { path = "../../metrics" } diff --git a/node/network/availability-distribution/Cargo.toml b/node/network/availability-distribution/Cargo.toml index ef9ddd12d2dd..823439f1fd65 100644 --- a/node/network/availability-distribution/Cargo.toml +++ b/node/network/availability-distribution/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-availability-distribution" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] futures = "0.3.21" diff --git a/node/network/availability-recovery/Cargo.toml b/node/network/availability-recovery/Cargo.toml index 2d6a6db0f567..f601b8aedc46 100644 --- a/node/network/availability-recovery/Cargo.toml +++ b/node/network/availability-recovery/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-availability-recovery" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] futures = "0.3.21" diff --git a/node/network/bitfield-distribution/Cargo.toml b/node/network/bitfield-distribution/Cargo.toml index 8f7f00d6a682..6f0f3eea2c9b 100644 --- a/node/network/bitfield-distribution/Cargo.toml +++ b/node/network/bitfield-distribution/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-availability-bitfield-distribution" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] futures = "0.3.21" diff --git a/node/network/bridge/Cargo.toml b/node/network/bridge/Cargo.toml index d45900f06dfb..4f3d6306aa8e 100644 --- a/node/network/bridge/Cargo.toml +++ b/node/network/bridge/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-network-bridge" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] always-assert = "0.1" diff --git a/node/network/collator-protocol/Cargo.toml b/node/network/collator-protocol/Cargo.toml index 25bb4fdb825f..4f6adba5487f 100644 --- a/node/network/collator-protocol/Cargo.toml +++ b/node/network/collator-protocol/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-collator-protocol" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] always-assert = "0.1.2" diff --git a/node/network/dispute-distribution/Cargo.toml b/node/network/dispute-distribution/Cargo.toml index c3f18a34531e..dd2b81cb1458 100644 --- a/node/network/dispute-distribution/Cargo.toml +++ b/node/network/dispute-distribution/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-dispute-distribution" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] futures = "0.3.21" diff --git a/node/network/gossip-support/Cargo.toml b/node/network/gossip-support/Cargo.toml index bdc68af4c39e..782213f622cc 100644 --- a/node/network/gossip-support/Cargo.toml +++ b/node/network/gossip-support/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-gossip-support" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/node/network/protocol/Cargo.toml b/node/network/protocol/Cargo.toml index f1a481081200..cfb7a5c2d0f0 100644 --- a/node/network/protocol/Cargo.toml +++ b/node/network/protocol/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-node-network-protocol" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true description = "Primitives types for the Node-side" [dependencies] diff --git a/node/network/statement-distribution/Cargo.toml b/node/network/statement-distribution/Cargo.toml index c338b0276e51..9d2ee1f621f6 100644 --- a/node/network/statement-distribution/Cargo.toml +++ b/node/network/statement-distribution/Cargo.toml @@ -4,6 +4,7 @@ description = "Statement Distribution Subsystem" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] futures = "0.3.21" diff --git a/node/overseer/Cargo.toml b/node/overseer/Cargo.toml index f242aa940448..b0576f5c61ef 100644 --- a/node/overseer/Cargo.toml +++ b/node/overseer/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-overseer" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] client = { package = "sc-client-api", git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/node/primitives/Cargo.toml b/node/primitives/Cargo.toml index 1bf9c81b60f9..fbd85c1a0551 100644 --- a/node/primitives/Cargo.toml +++ b/node/primitives/Cargo.toml @@ -4,6 +4,7 @@ description = "Primitives types for the Node-side" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] bounded-vec = "0.7" diff --git a/node/service/Cargo.toml b/node/service/Cargo.toml index d02bee89ffa1..e373dd4f0011 100644 --- a/node/service/Cargo.toml +++ b/node/service/Cargo.toml @@ -4,6 +4,7 @@ rust-version = "1.60" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] # Substrate Client diff --git a/node/subsystem-test-helpers/Cargo.toml b/node/subsystem-test-helpers/Cargo.toml index 41c48d7f31e1..81bc19a13031 100644 --- a/node/subsystem-test-helpers/Cargo.toml +++ b/node/subsystem-test-helpers/Cargo.toml @@ -4,6 +4,7 @@ description = "Subsystem traits and message definitions" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] async-trait = "0.1.57" diff --git a/node/subsystem-types/Cargo.toml b/node/subsystem-types/Cargo.toml index 70fce755f806..d994682110e5 100644 --- a/node/subsystem-types/Cargo.toml +++ b/node/subsystem-types/Cargo.toml @@ -4,6 +4,7 @@ description = "Subsystem traits and message definitions" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] derive_more = "0.99.17" diff --git a/node/subsystem-util/Cargo.toml b/node/subsystem-util/Cargo.toml index ab4f7a5972f7..8c4de01ab314 100644 --- a/node/subsystem-util/Cargo.toml +++ b/node/subsystem-util/Cargo.toml @@ -4,6 +4,7 @@ description = "Subsystem traits and message definitions" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] async-trait = "0.1.57" diff --git a/node/subsystem/Cargo.toml b/node/subsystem/Cargo.toml index 2d788c10b4c1..368a194091f5 100644 --- a/node/subsystem/Cargo.toml +++ b/node/subsystem/Cargo.toml @@ -4,6 +4,7 @@ description = "Subsystem traits and message definitions and the generated overse version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] polkadot-overseer = { path = "../overseer" } diff --git a/node/test/client/Cargo.toml b/node/test/client/Cargo.toml index 366d244952c1..33c240443d02 100644 --- a/node/test/client/Cargo.toml +++ b/node/test/client/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-test-client" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } diff --git a/node/test/performance-test/Cargo.toml b/node/test/performance-test/Cargo.toml index 0e5800b920e3..c6d0ce7f7ec9 100644 --- a/node/test/performance-test/Cargo.toml +++ b/node/test/performance-test/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-performance-test" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] thiserror = "1.0.31" diff --git a/node/test/service/Cargo.toml b/node/test/service/Cargo.toml index 504ee5beca74..08e9e3889b06 100644 --- a/node/test/service/Cargo.toml +++ b/node/test/service/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-test-service" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] futures = "0.3.21" diff --git a/node/zombienet-backchannel/Cargo.toml b/node/zombienet-backchannel/Cargo.toml index 0b6bf70611eb..1c032cc3f136 100644 --- a/node/zombienet-backchannel/Cargo.toml +++ b/node/zombienet-backchannel/Cargo.toml @@ -1,12 +1,12 @@ [package] name = "zombienet-backchannel" description = "Zombienet backchannel to notify test runner and coordinate with malus actors." -license = "GPL-3.0-only" readme = "README.md" publish = false version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] tokio = { version = "1.24.2", default-features = false, features = ["macros", "net", "rt-multi-thread", "sync"] } diff --git a/parachain/Cargo.toml b/parachain/Cargo.toml index 70eafb3b8b31..5d49042bc0cc 100644 --- a/parachain/Cargo.toml +++ b/parachain/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-parachain" description = "Types and utilities for creating and working with parachains" authors.workspace = true edition.workspace = true +license.workspace = true version.workspace = true [dependencies] diff --git a/parachain/test-parachains/Cargo.toml b/parachain/test-parachains/Cargo.toml index b5c1303590f2..a3fa882e1f4c 100644 --- a/parachain/test-parachains/Cargo.toml +++ b/parachain/test-parachains/Cargo.toml @@ -4,6 +4,7 @@ description = "Integration tests using the test-parachains" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true publish = false [dependencies] diff --git a/parachain/test-parachains/adder/Cargo.toml b/parachain/test-parachains/adder/Cargo.toml index 64123d312550..5e1b9a7d174c 100644 --- a/parachain/test-parachains/adder/Cargo.toml +++ b/parachain/test-parachains/adder/Cargo.toml @@ -3,6 +3,7 @@ name = "test-parachain-adder" description = "Test parachain which adds to a number as its state transition" build = "build.rs" edition.workspace = true +license.workspace = true version.workspace = true authors.workspace = true diff --git a/parachain/test-parachains/adder/collator/Cargo.toml b/parachain/test-parachains/adder/collator/Cargo.toml index 29a10069e3e0..fec95a5718a1 100644 --- a/parachain/test-parachains/adder/collator/Cargo.toml +++ b/parachain/test-parachains/adder/collator/Cargo.toml @@ -4,6 +4,7 @@ description = "Collator for the adder test parachain" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [[bin]] name = "adder-collator" diff --git a/parachain/test-parachains/halt/Cargo.toml b/parachain/test-parachains/halt/Cargo.toml index a5147bb98fa8..99076aae6aa3 100644 --- a/parachain/test-parachains/halt/Cargo.toml +++ b/parachain/test-parachains/halt/Cargo.toml @@ -5,6 +5,7 @@ build = "build.rs" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] diff --git a/parachain/test-parachains/undying/Cargo.toml b/parachain/test-parachains/undying/Cargo.toml index e2a8448ebf15..43cb1bc37fda 100644 --- a/parachain/test-parachains/undying/Cargo.toml +++ b/parachain/test-parachains/undying/Cargo.toml @@ -5,6 +5,7 @@ build = "build.rs" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] parachain = { package = "polkadot-parachain", path = "../../", default-features = false, features = [ "wasm-api" ] } diff --git a/parachain/test-parachains/undying/collator/Cargo.toml b/parachain/test-parachains/undying/collator/Cargo.toml index f63757a20958..4f1a34f977c8 100644 --- a/parachain/test-parachains/undying/collator/Cargo.toml +++ b/parachain/test-parachains/undying/collator/Cargo.toml @@ -2,6 +2,7 @@ name = "test-parachain-undying-collator" description = "Collator for the undying test parachain" edition.workspace = true +license.workspace = true version.workspace = true authors.workspace = true diff --git a/primitives/Cargo.toml b/primitives/Cargo.toml index 691e987331fa..51c2bf8bea42 100644 --- a/primitives/Cargo.toml +++ b/primitives/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-primitives" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } diff --git a/primitives/test-helpers/Cargo.toml b/primitives/test-helpers/Cargo.toml index 917e5f3babdb..a1f7f9268b9f 100644 --- a/primitives/test-helpers/Cargo.toml +++ b/primitives/test-helpers/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-primitives-test-helpers" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 6f1c7bfa81ba..18913718676c 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-rpc" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] jsonrpsee = { version = "0.16.2", features = ["server"] } diff --git a/runtime/common/Cargo.toml b/runtime/common/Cargo.toml index b671a5026fd4..c9812d806733 100644 --- a/runtime/common/Cargo.toml +++ b/runtime/common/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-runtime-common" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] impl-trait-for-tuples = "0.2.2" diff --git a/runtime/common/slot_range_helper/Cargo.toml b/runtime/common/slot_range_helper/Cargo.toml index 3012b64cbd0b..3d48b1c03c53 100644 --- a/runtime/common/slot_range_helper/Cargo.toml +++ b/runtime/common/slot_range_helper/Cargo.toml @@ -3,6 +3,7 @@ name = "slot-range-helper" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] paste = "1.0" diff --git a/runtime/kusama/Cargo.toml b/runtime/kusama/Cargo.toml index 3b8c5cd3bb68..645215bc2c22 100644 --- a/runtime/kusama/Cargo.toml +++ b/runtime/kusama/Cargo.toml @@ -4,6 +4,7 @@ build = "build.rs" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } diff --git a/runtime/kusama/constants/Cargo.toml b/runtime/kusama/constants/Cargo.toml index 38f91eccabd1..11ff70cd8c9b 100644 --- a/runtime/kusama/constants/Cargo.toml +++ b/runtime/kusama/constants/Cargo.toml @@ -3,6 +3,7 @@ name = "kusama-runtime-constants" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] smallvec = "1.11.0" diff --git a/runtime/metrics/Cargo.toml b/runtime/metrics/Cargo.toml index ebabb6a832a9..7bbf8d066f05 100644 --- a/runtime/metrics/Cargo.toml +++ b/runtime/metrics/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-runtime-metrics" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] sp-std = { package = "sp-std", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false} diff --git a/runtime/parachains/Cargo.toml b/runtime/parachains/Cargo.toml index dda0c2b38715..c3acdd781ea2 100644 --- a/runtime/parachains/Cargo.toml +++ b/runtime/parachains/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-runtime-parachains" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } diff --git a/runtime/polkadot/Cargo.toml b/runtime/polkadot/Cargo.toml index eac818c70e00..0ab06b8bbb12 100644 --- a/runtime/polkadot/Cargo.toml +++ b/runtime/polkadot/Cargo.toml @@ -4,6 +4,7 @@ build = "build.rs" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } diff --git a/runtime/polkadot/constants/Cargo.toml b/runtime/polkadot/constants/Cargo.toml index 03e5f55f52b1..a10546edfa7b 100644 --- a/runtime/polkadot/constants/Cargo.toml +++ b/runtime/polkadot/constants/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-runtime-constants" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] smallvec = "1.11.0" diff --git a/runtime/rococo/Cargo.toml b/runtime/rococo/Cargo.toml index 8c2aa82bfbc2..41d25d3aa6f6 100644 --- a/runtime/rococo/Cargo.toml +++ b/runtime/rococo/Cargo.toml @@ -4,6 +4,7 @@ build = "build.rs" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } diff --git a/runtime/rococo/constants/Cargo.toml b/runtime/rococo/constants/Cargo.toml index 9fbd51df534a..f9ea1186c550 100644 --- a/runtime/rococo/constants/Cargo.toml +++ b/runtime/rococo/constants/Cargo.toml @@ -3,6 +3,7 @@ name = "rococo-runtime-constants" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] smallvec = "1.11.0" diff --git a/runtime/test-runtime/Cargo.toml b/runtime/test-runtime/Cargo.toml index b3318f59477e..76bd63d59462 100644 --- a/runtime/test-runtime/Cargo.toml +++ b/runtime/test-runtime/Cargo.toml @@ -4,6 +4,7 @@ build = "build.rs" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } diff --git a/runtime/test-runtime/constants/Cargo.toml b/runtime/test-runtime/constants/Cargo.toml index 30f5c68589ef..9b435da80682 100644 --- a/runtime/test-runtime/constants/Cargo.toml +++ b/runtime/test-runtime/constants/Cargo.toml @@ -3,6 +3,7 @@ name = "test-runtime-constants" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] smallvec = "1.11.0" diff --git a/runtime/westend/Cargo.toml b/runtime/westend/Cargo.toml index 29a71f4d9985..4773176e1762 100644 --- a/runtime/westend/Cargo.toml +++ b/runtime/westend/Cargo.toml @@ -4,6 +4,7 @@ build = "build.rs" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } diff --git a/runtime/westend/constants/Cargo.toml b/runtime/westend/constants/Cargo.toml index 42fdd796b0f8..e5d9900e22e2 100644 --- a/runtime/westend/constants/Cargo.toml +++ b/runtime/westend/constants/Cargo.toml @@ -3,6 +3,7 @@ name = "westend-runtime-constants" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] smallvec = "1.11.0" diff --git a/statement-table/Cargo.toml b/statement-table/Cargo.toml index 04a309685b6b..6c1a3d143454 100644 --- a/statement-table/Cargo.toml +++ b/statement-table/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-statement-table" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } diff --git a/utils/generate-bags/Cargo.toml b/utils/generate-bags/Cargo.toml index 2038dc94a1ea..1a0f1d3fbfcf 100644 --- a/utils/generate-bags/Cargo.toml +++ b/utils/generate-bags/Cargo.toml @@ -3,6 +3,7 @@ name = "polkadot-voter-bags" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] clap = { version = "4.0.9", features = ["derive"] } diff --git a/utils/remote-ext-tests/bags-list/Cargo.toml b/utils/remote-ext-tests/bags-list/Cargo.toml index 7eb945ef65b2..772efb1eddd0 100644 --- a/utils/remote-ext-tests/bags-list/Cargo.toml +++ b/utils/remote-ext-tests/bags-list/Cargo.toml @@ -3,6 +3,7 @@ name = "remote-ext-tests-bags-list" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] polkadot-runtime = { path = "../../../runtime/polkadot" } diff --git a/utils/staking-miner/Cargo.toml b/utils/staking-miner/Cargo.toml index 8d951503408d..05b0ddefaee9 100644 --- a/utils/staking-miner/Cargo.toml +++ b/utils/staking-miner/Cargo.toml @@ -7,6 +7,7 @@ name = "staking-miner" version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true publish = false [dependencies] diff --git a/xcm/Cargo.toml b/xcm/Cargo.toml index ca407dc908fb..a03d392d5fd0 100644 --- a/xcm/Cargo.toml +++ b/xcm/Cargo.toml @@ -4,6 +4,7 @@ description = "The basic XCM datastructures." version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] bounded-collections = { version = "0.1.8", default-features = false, features = ["serde"] } diff --git a/xcm/pallet-xcm-benchmarks/Cargo.toml b/xcm/pallet-xcm-benchmarks/Cargo.toml index a74c8baac49a..a77b58059b42 100644 --- a/xcm/pallet-xcm-benchmarks/Cargo.toml +++ b/xcm/pallet-xcm-benchmarks/Cargo.toml @@ -2,6 +2,7 @@ name = "pallet-xcm-benchmarks" authors.workspace = true edition.workspace = true +license.workspace = true version.workspace = true [package.metadata.docs.rs] diff --git a/xcm/pallet-xcm/Cargo.toml b/xcm/pallet-xcm/Cargo.toml index 849d1f02ad20..7d5d278b0e0a 100644 --- a/xcm/pallet-xcm/Cargo.toml +++ b/xcm/pallet-xcm/Cargo.toml @@ -2,6 +2,7 @@ name = "pallet-xcm" authors.workspace = true edition.workspace = true +license.workspace = true version.workspace = true diff --git a/xcm/procedural/Cargo.toml b/xcm/procedural/Cargo.toml index 6b7fc95204cc..b60c8eed6151 100644 --- a/xcm/procedural/Cargo.toml +++ b/xcm/procedural/Cargo.toml @@ -2,6 +2,7 @@ name = "xcm-procedural" authors.workspace = true edition.workspace = true +license.workspace = true version.workspace = true [lib] diff --git a/xcm/xcm-builder/Cargo.toml b/xcm/xcm-builder/Cargo.toml index 7dc7a7fc0aa2..fec354d0caea 100644 --- a/xcm/xcm-builder/Cargo.toml +++ b/xcm/xcm-builder/Cargo.toml @@ -3,6 +3,7 @@ name = "xcm-builder" description = "Tools & types for building with XCM and its executor." authors.workspace = true edition.workspace = true +license.workspace = true version.workspace = true [dependencies] diff --git a/xcm/xcm-executor/Cargo.toml b/xcm/xcm-executor/Cargo.toml index 4e6c63a19081..92e6dc95442f 100644 --- a/xcm/xcm-executor/Cargo.toml +++ b/xcm/xcm-executor/Cargo.toml @@ -3,6 +3,7 @@ name = "xcm-executor" description = "An abstract and configurable XCM message executor." authors.workspace = true edition.workspace = true +license.workspace = true version.workspace = true [dependencies] diff --git a/xcm/xcm-executor/integration-tests/Cargo.toml b/xcm/xcm-executor/integration-tests/Cargo.toml index 4fff3952a05d..d2af1304beb6 100644 --- a/xcm/xcm-executor/integration-tests/Cargo.toml +++ b/xcm/xcm-executor/integration-tests/Cargo.toml @@ -3,6 +3,7 @@ name = "xcm-executor-integration-tests" description = "Integration tests for the XCM Executor" authors.workspace = true edition.workspace = true +license.workspace = true version.workspace = true [dependencies] diff --git a/xcm/xcm-simulator/Cargo.toml b/xcm/xcm-simulator/Cargo.toml index bb23b8b092a4..79ee27c6041c 100644 --- a/xcm/xcm-simulator/Cargo.toml +++ b/xcm/xcm-simulator/Cargo.toml @@ -4,6 +4,7 @@ description = "Test kit to simulate cross-chain message passing and XCM executio version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } diff --git a/xcm/xcm-simulator/example/Cargo.toml b/xcm/xcm-simulator/example/Cargo.toml index 0bf0654cc4ed..7d47755dc488 100644 --- a/xcm/xcm-simulator/example/Cargo.toml +++ b/xcm/xcm-simulator/example/Cargo.toml @@ -3,6 +3,7 @@ name = "xcm-simulator-example" description = "Examples of xcm-simulator usage." authors.workspace = true edition.workspace = true +license.workspace = true version.workspace = true [dependencies] diff --git a/xcm/xcm-simulator/fuzzer/Cargo.toml b/xcm/xcm-simulator/fuzzer/Cargo.toml index 1c36a1595ec0..ce0528adf1f5 100644 --- a/xcm/xcm-simulator/fuzzer/Cargo.toml +++ b/xcm/xcm-simulator/fuzzer/Cargo.toml @@ -4,6 +4,7 @@ description = "Examples of xcm-simulator usage." version.workspace = true authors.workspace = true edition.workspace = true +license.workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } From 62b489fe284229ddbff75c6914968c65dab9ea87 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Sat, 5 Aug 2023 18:09:21 +0200 Subject: [PATCH 16/35] Remove xcm on_runtime_upgrade pallet hook (#7235) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * move migration stuffs * Apply suggestions from code review Co-authored-by: Bastian Köcher * Fix test * fix * lint * fix lint * rm extra space * fix lint * PR review * fixes * use saturating_accrue in fn * fix test --------- Co-authored-by: Bastian Köcher --- xcm/pallet-xcm/src/lib.rs | 6 ----- xcm/pallet-xcm/src/migration.rs | 43 +++++++++++++++------------------ xcm/pallet-xcm/src/tests.rs | 13 +++++----- 3 files changed, 27 insertions(+), 35 deletions(-) diff --git a/xcm/pallet-xcm/src/lib.rs b/xcm/pallet-xcm/src/lib.rs index 990871b5c483..d52d5ba24271 100644 --- a/xcm/pallet-xcm/src/lib.rs +++ b/xcm/pallet-xcm/src/lib.rs @@ -689,12 +689,6 @@ pub mod pallet { } weight_used } - fn on_runtime_upgrade() -> Weight { - // Start a migration (this happens before on_initialize so it'll happen later in this - // block, which should be good enough)... - CurrentMigration::::put(VersionMigrationStage::default()); - T::DbWeight::get().writes(1) - } } pub mod migrations { diff --git a/xcm/pallet-xcm/src/migration.rs b/xcm/pallet-xcm/src/migration.rs index 1f1dac1c9e81..08809f0d2f2e 100644 --- a/xcm/pallet-xcm/src/migration.rs +++ b/xcm/pallet-xcm/src/migration.rs @@ -25,6 +25,7 @@ const DEFAULT_PROOF_SIZE: u64 = 64 * 1024; pub mod v1 { use super::*; + use crate::{CurrentMigration, VersionMigrationStage}; /// Named with the 'VersionUnchecked'-prefix because although this implements some version /// checking, the version checking is not complete as it will begin failing after the upgrade is @@ -33,34 +34,30 @@ pub mod v1 { /// Use experimental [`VersionCheckedMigrateToV1`] instead. pub struct VersionUncheckedMigrateToV1(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for VersionUncheckedMigrateToV1 { - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { - ensure!(StorageVersion::get::>() == 0, "must upgrade linearly"); - - Ok(sp_std::vec::Vec::new()) - } - fn on_runtime_upgrade() -> Weight { - if StorageVersion::get::>() == 0 { - let mut weight = T::DbWeight::get().reads(1); + let mut weight = T::DbWeight::get().reads(1); + + if StorageVersion::get::>() != 0 { + log::warn!("skipping v1, should be removed"); + return weight + } - let translate = |pre: (u64, u64, u32)| -> Option<(u64, Weight, u32)> { - weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); - let translated = (pre.0, Weight::from_parts(pre.1, DEFAULT_PROOF_SIZE), pre.2); - log::info!("Migrated VersionNotifyTarget {:?} to {:?}", pre, translated); - Some(translated) - }; + weight.saturating_accrue(T::DbWeight::get().writes(1)); + CurrentMigration::::put(VersionMigrationStage::default()); - VersionNotifyTargets::::translate_values(translate); + let translate = |pre: (u64, u64, u32)| -> Option<(u64, Weight, u32)> { + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + let translated = (pre.0, Weight::from_parts(pre.1, DEFAULT_PROOF_SIZE), pre.2); + log::info!("Migrated VersionNotifyTarget {:?} to {:?}", pre, translated); + Some(translated) + }; - log::info!("v1 applied successfully"); - StorageVersion::new(1).put::>(); + VersionNotifyTargets::::translate_values(translate); - weight.saturating_add(T::DbWeight::get().writes(1)) - } else { - log::warn!("skipping v1, should be removed"); - T::DbWeight::get().reads(1) - } + log::info!("v1 applied successfully"); + weight.saturating_accrue(T::DbWeight::get().writes(1)); + StorageVersion::new(1).put::>(); + weight } } diff --git a/xcm/pallet-xcm/src/tests.rs b/xcm/pallet-xcm/src/tests.rs index 2ad13dced936..f42eb987876a 100644 --- a/xcm/pallet-xcm/src/tests.rs +++ b/xcm/pallet-xcm/src/tests.rs @@ -16,7 +16,8 @@ use crate::{ mock::*, AssetTraps, CurrentMigration, Error, LatestVersionedMultiLocation, Queries, - QueryStatus, VersionDiscoveryQueue, VersionNotifiers, VersionNotifyTargets, + QueryStatus, VersionDiscoveryQueue, VersionMigrationStage, VersionNotifiers, + VersionNotifyTargets, }; use frame_support::{ assert_noop, assert_ok, @@ -897,7 +898,7 @@ fn subscription_side_works() { assert_eq!(take_sent_xcm(), vec![(remote.clone(), Xcm(vec![instr]))]); // A runtime upgrade which doesn't alter the version sends no notifications. - XcmPallet::on_runtime_upgrade(); + CurrentMigration::::put(VersionMigrationStage::default()); XcmPallet::on_initialize(1); assert_eq!(take_sent_xcm(), vec![]); @@ -905,7 +906,7 @@ fn subscription_side_works() { AdvertisedXcmVersion::set(2); // A runtime upgrade which alters the version does send notifications. - XcmPallet::on_runtime_upgrade(); + CurrentMigration::::put(VersionMigrationStage::default()); XcmPallet::on_initialize(2); let instr = QueryResponse { query_id: 0, @@ -932,7 +933,7 @@ fn subscription_side_upgrades_work_with_notify() { AdvertisedXcmVersion::set(3); // A runtime upgrade which alters the version does send notifications. - XcmPallet::on_runtime_upgrade(); + CurrentMigration::::put(VersionMigrationStage::default()); XcmPallet::on_initialize(1); let instr1 = QueryResponse { @@ -982,7 +983,7 @@ fn subscription_side_upgrades_work_without_notify() { VersionNotifyTargets::::insert(3, v3_location, (72, Weight::zero(), 2)); // A runtime upgrade which alters the version does send notifications. - XcmPallet::on_runtime_upgrade(); + CurrentMigration::::put(VersionMigrationStage::default()); XcmPallet::on_initialize(1); let mut contents = VersionNotifyTargets::::iter().collect::>(); @@ -1166,7 +1167,7 @@ fn subscription_side_upgrades_work_with_multistage_notify() { AdvertisedXcmVersion::set(3); // A runtime upgrade which alters the version does send notifications. - XcmPallet::on_runtime_upgrade(); + CurrentMigration::::put(VersionMigrationStage::default()); let mut maybe_migration = CurrentMigration::::take(); let mut counter = 0; while let Some(migration) = maybe_migration.take() { From 14e660500c6e5fd94475d65f4673978f586fa09f Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Mon, 7 Aug 2023 09:13:36 -0700 Subject: [PATCH 17/35] Document non-uniqueness of SetTopic IDs (#7579) * Document non-uniqueness of SetTopic IDs * More comments on WithUniqueTopic --- xcm/src/v3/mod.rs | 4 ++++ xcm/xcm-builder/src/barriers.rs | 3 +++ xcm/xcm-builder/src/routing.rs | 5 +++++ 3 files changed, 12 insertions(+) diff --git a/xcm/src/v3/mod.rs b/xcm/src/v3/mod.rs index b1d134ae082c..772ad48ac4b2 100644 --- a/xcm/src/v3/mod.rs +++ b/xcm/src/v3/mod.rs @@ -981,6 +981,10 @@ pub enum Instruction { /// Set the Topic Register. /// + /// The 32-byte array identifier in the parameter is not guaranteed to be + /// unique; if such a property is desired, it is up to the code author to + /// enforce uniqueness. + /// /// Safety: No concerns. /// /// Kind: *Instruction* diff --git a/xcm/xcm-builder/src/barriers.rs b/xcm/xcm-builder/src/barriers.rs index 13c1caca5ff6..6996c7145528 100644 --- a/xcm/xcm-builder/src/barriers.rs +++ b/xcm/xcm-builder/src/barriers.rs @@ -207,6 +207,9 @@ impl< /// Sets the message ID to `t` using a `SetTopic(t)` in the last position if present. /// +/// Note that the message ID does not necessarily have to be unique; it is the +/// sender's responsibility to ensure uniqueness. +/// /// Requires some inner barrier to pass on the rest of the message. pub struct TrailingSetTopicAsId(PhantomData); impl ShouldExecute for TrailingSetTopicAsId { diff --git a/xcm/xcm-builder/src/routing.rs b/xcm/xcm-builder/src/routing.rs index c46e0ce78569..39e9eab410bf 100644 --- a/xcm/xcm-builder/src/routing.rs +++ b/xcm/xcm-builder/src/routing.rs @@ -25,6 +25,11 @@ use xcm::prelude::*; /// appends one to the message filled with a universally unique ID. This ID is returned from a /// successful `deliver`. /// +/// If the message does already end with a `SetTopic` instruction, then it is the responsibility +/// of the code author to ensure that the ID supplied to `SetTopic` is universally unique. Due to +/// this property, consumers of the topic ID must be aware that a user-supplied ID may not be +/// unique. +/// /// This is designed to be at the top-level of any routers, since it will always mutate the /// passed `message` reference into a `None`. Don't try to combine it within a tuple except as the /// last element. From 929e2d48c03c69ccd6ff7db20ba4e641dd966bf7 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Tue, 8 Aug 2023 09:51:40 -0400 Subject: [PATCH 18/35] PVF: Add missing crate descriptions (#7587) --- node/core/candidate-validation/Cargo.toml | 1 + node/core/pvf-checker/Cargo.toml | 1 + node/core/pvf/Cargo.toml | 1 + node/core/pvf/common/Cargo.toml | 1 + node/core/pvf/common/src/lib.rs | 2 +- node/core/pvf/execute-worker/Cargo.toml | 1 + node/core/pvf/execute-worker/src/lib.rs | 2 ++ node/core/pvf/prepare-worker/Cargo.toml | 1 + node/core/pvf/prepare-worker/src/lib.rs | 2 ++ node/core/pvf/src/lib.rs | 2 +- 10 files changed, 12 insertions(+), 2 deletions(-) diff --git a/node/core/candidate-validation/Cargo.toml b/node/core/candidate-validation/Cargo.toml index ba40fea8140b..0401c892d426 100644 --- a/node/core/candidate-validation/Cargo.toml +++ b/node/core/candidate-validation/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "polkadot-node-core-candidate-validation" +description = "Polkadot crate that implements the Candidate Validation subsystem. Handles requests to validate candidates according to a PVF." version.workspace = true authors.workspace = true edition.workspace = true diff --git a/node/core/pvf-checker/Cargo.toml b/node/core/pvf-checker/Cargo.toml index ee7001524265..2b6b53be4072 100644 --- a/node/core/pvf-checker/Cargo.toml +++ b/node/core/pvf-checker/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "polkadot-node-core-pvf-checker" +description = "Polkadot crate that implements the PVF pre-checking subsystem. Responsible for checking and voting for PVFs that are pending approval." version.workspace = true authors.workspace = true edition.workspace = true diff --git a/node/core/pvf/Cargo.toml b/node/core/pvf/Cargo.toml index d6e9ef576628..02a56ed9d2df 100644 --- a/node/core/pvf/Cargo.toml +++ b/node/core/pvf/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "polkadot-node-core-pvf" +description = "Polkadot crate that implements the PVF validation host. Responsible for coordinating preparation and execution of PVFs." version.workspace = true authors.workspace = true edition.workspace = true diff --git a/node/core/pvf/common/Cargo.toml b/node/core/pvf/common/Cargo.toml index 3e674422f812..a091f8f75806 100644 --- a/node/core/pvf/common/Cargo.toml +++ b/node/core/pvf/common/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "polkadot-node-core-pvf-common" +description = "Polkadot crate that contains functionality related to PVFs that is shared by the PVF host and the PVF workers." version.workspace = true authors.workspace = true edition.workspace = true diff --git a/node/core/pvf/common/src/lib.rs b/node/core/pvf/common/src/lib.rs index e5737a66aaec..7e0cab45b671 100644 --- a/node/core/pvf/common/src/lib.rs +++ b/node/core/pvf/common/src/lib.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Functionality that is shared by the host and the workers. +//! Contains functionality related to PVFs that is shared by the PVF host and the PVF workers. pub mod error; pub mod execute; diff --git a/node/core/pvf/execute-worker/Cargo.toml b/node/core/pvf/execute-worker/Cargo.toml index 1c9e2d1f2784..931ea6951a68 100644 --- a/node/core/pvf/execute-worker/Cargo.toml +++ b/node/core/pvf/execute-worker/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "polkadot-node-core-pvf-execute-worker" +description = "Polkadot crate that contains the logic for executing PVFs. Used by the polkadot-execute-worker binary." version.workspace = true authors.workspace = true edition.workspace = true diff --git a/node/core/pvf/execute-worker/src/lib.rs b/node/core/pvf/execute-worker/src/lib.rs index d90cac2522fd..c6ee515f9093 100644 --- a/node/core/pvf/execute-worker/src/lib.rs +++ b/node/core/pvf/execute-worker/src/lib.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +//! Contains the logic for executing PVFs. Used by the polkadot-execute-worker binary. + pub use polkadot_node_core_pvf_common::executor_intf::Executor; // NOTE: Initializing logging in e.g. tests will not have an effect in the workers, as they are diff --git a/node/core/pvf/prepare-worker/Cargo.toml b/node/core/pvf/prepare-worker/Cargo.toml index ec5b1e9c2399..83ed6f387bae 100644 --- a/node/core/pvf/prepare-worker/Cargo.toml +++ b/node/core/pvf/prepare-worker/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "polkadot-node-core-pvf-prepare-worker" +description = "Polkadot crate that contains the logic for preparing PVFs. Used by the polkadot-prepare-worker binary." version.workspace = true authors.workspace = true edition.workspace = true diff --git a/node/core/pvf/prepare-worker/src/lib.rs b/node/core/pvf/prepare-worker/src/lib.rs index 2df6b1ee6b3e..c9d258625df9 100644 --- a/node/core/pvf/prepare-worker/src/lib.rs +++ b/node/core/pvf/prepare-worker/src/lib.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +//! Contains the logic for preparing PVFs. Used by the polkadot-prepare-worker binary. + mod executor_intf; mod memory_stats; diff --git a/node/core/pvf/src/lib.rs b/node/core/pvf/src/lib.rs index 772c0b430c1b..2ed3f5242ded 100644 --- a/node/core/pvf/src/lib.rs +++ b/node/core/pvf/src/lib.rs @@ -16,7 +16,7 @@ #![warn(missing_docs)] -//! A crate that implements the PVF validation host. +//! The PVF validation host. Responsible for coordinating preparation and execution of PVFs. //! //! For more background, refer to the Implementer's Guide: [PVF //! Pre-checking](https://paritytech.github.io/polkadot/book/pvf-prechecking.html) and [Candidate From 06ccdf2bd8a2f8fbda81e33638d8245906212a23 Mon Sep 17 00:00:00 2001 From: Xiliang Chen Date: Wed, 9 Aug 2023 02:16:42 +1200 Subject: [PATCH 19/35] update weight file template (#7589) --- xcm/pallet-xcm-benchmarks/template.hbs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xcm/pallet-xcm-benchmarks/template.hbs b/xcm/pallet-xcm-benchmarks/template.hbs index a0ba5173258f..81cdf62812b6 100644 --- a/xcm/pallet-xcm-benchmarks/template.hbs +++ b/xcm/pallet-xcm-benchmarks/template.hbs @@ -5,7 +5,7 @@ //! DATE: {{date}}, STEPS: `{{cmd.steps}}`, REPEAT: `{{cmd.repeat}}`, LOW RANGE: `{{cmd.lowest_range_values}}`, HIGH RANGE: `{{cmd.highest_range_values}}` //! WORST CASE MAP SIZE: `{{cmd.worst_case_map_values}}` //! HOSTNAME: `{{hostname}}`, CPU: `{{cpuname}}` -//! EXECUTION: {{cmd.execution}}, WASM-EXECUTION: {{cmd.wasm_execution}}, CHAIN: {{cmd.chain}}, DB CACHE: {{cmd.db_cache}} +//! WASM-EXECUTION: {{cmd.wasm_execution}}, CHAIN: {{cmd.chain}}, DB CACHE: {{cmd.db_cache}} // Executed Command: {{#each args as |arg|}} From aaea117ad427ee07fc9c0c95fc28e9d4772f3e41 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Wed, 9 Aug 2023 18:28:48 +0200 Subject: [PATCH 20/35] Companion for #14412 (#7547) * Companion for 14412 * update lockfile for {"substrate"} * Trigger CI --------- Co-authored-by: parity-processbot <> --- Cargo.lock | 652 +++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 452 insertions(+), 200 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 297c85c67093..69571875070f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -246,6 +246,164 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29d47fbf90d5149a107494b15a7dc8d69b351be2db3bb9691740e88ec17fd880" +[[package]] +name = "ark-bls12-381" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c775f0d12169cba7aae4caeb547bb6a50781c7449a8aa53793827c9ec4abf488" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-serialize", + "ark-std", +] + +[[package]] +name = "ark-ec" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "defd9a439d56ac24968cca0571f598a61bc8c55f71d50a89cda591cb750670ba" +dependencies = [ + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "derivative", + "hashbrown 0.13.2", + "itertools", + "num-traits", + "zeroize", +] + +[[package]] +name = "ark-ed-on-bls12-381-bandersnatch" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9cde0f2aa063a2a5c28d39b47761aa102bda7c13c84fc118a61b87c7b2f785c" +dependencies = [ + "ark-bls12-381", + "ark-ec", + "ark-ff", + "ark-std", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm", + "ark-ff-macros", + "ark-serialize", + "ark-std", + "derivative", + "digest 0.10.7", + "itertools", + "num-bigint", + "num-traits", + "paste", + "rustc_version", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-poly" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d320bfc44ee185d899ccbadfa8bc31aab923ce1558716e1997a1e74057fe86bf" +dependencies = [ + "ark-ff", + "ark-serialize", + "ark-std", + "derivative", + "hashbrown 0.13.2", +] + +[[package]] +name = "ark-secret-scalar" +version = "0.0.2" +source = "git+https://github.com/w3f/ring-vrf?rev=c86ebd4#c86ebd4114d3165d05f9ce28c1d9e8d7a9a4e801" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-serialize", + "ark-std", + "ark-transcript", + "digest 0.10.7", + "rand_core 0.6.4", + "zeroize", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-serialize-derive", + "ark-std", + "digest 0.10.7", + "num-bigint", +] + +[[package]] +name = "ark-serialize-derive" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "ark-transcript" +version = "0.0.2" +source = "git+https://github.com/w3f/ring-vrf?rev=c86ebd4#c86ebd4114d3165d05f9ce28c1d9e8d7a9a4e801" +dependencies = [ + "ark-ff", + "ark-serialize", + "ark-std", + "digest 0.10.7", + "rand_core 0.6.4", + "sha3", +] + [[package]] name = "array-bytes" version = "6.1.0" @@ -396,6 +554,27 @@ dependencies = [ "rustc-demangle", ] +[[package]] +name = "bandersnatch_vrfs" +version = "0.0.1" +source = "git+https://github.com/w3f/ring-vrf?rev=c86ebd4#c86ebd4114d3165d05f9ce28c1d9e8d7a9a4e801" +dependencies = [ + "ark-bls12-381", + "ark-ec", + "ark-ed-on-bls12-381-bandersnatch", + "ark-ff", + "ark-serialize", + "ark-std", + "dleq_vrf", + "fflonk", + "merlin 3.0.0", + "rand_chacha 0.3.1", + "rand_core 0.6.4", + "ring 0.1.0", + "sha2 0.10.7", + "zeroize", +] + [[package]] name = "base-x" version = "0.2.8" @@ -438,7 +617,7 @@ dependencies = [ [[package]] name = "binary-merkle-tree" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "hash-db", "log", @@ -1012,6 +1191,20 @@ dependencies = [ "unicode-width", ] +[[package]] +name = "common" +version = "0.1.0" +source = "git+https://github.com/w3f/ring-proof#0e948f3c28cbacecdd3020403c4841c0eb339213" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "fflonk", + "merlin 3.0.0", +] + [[package]] name = "common-path" version = "1.0.0" @@ -1685,6 +1878,22 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "31ad93652f40969dead8d4bf897a41e9462095152eb21c56e5830537e41179dd" +[[package]] +name = "dleq_vrf" +version = "0.0.2" +source = "git+https://github.com/w3f/ring-vrf?rev=c86ebd4#c86ebd4114d3165d05f9ce28c1d9e8d7a9a4e801" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-secret-scalar", + "ark-serialize", + "ark-std", + "ark-transcript", + "arrayvec 0.7.4", + "rand_core 0.6.4", + "zeroize", +] + [[package]] name = "dlmalloc" version = "0.2.4" @@ -1702,18 +1911,18 @@ checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" [[package]] name = "docify" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6491709f76fb7ceb951244daf624d480198b427556084391d6e3c33d3ae74b9" +checksum = "029de870d175d11969524d91a3fb2cbf6d488b853bff99d41cf65e533ac7d9d2" dependencies = [ "docify_macros", ] [[package]] name = "docify_macros" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc5338a9f72ce29a81377d9039798fcc926fb471b2004666caf48e446dffbbf" +checksum = "cac43324656a1b05eb0186deb51f27d2d891c704c37f34de281ef6297ba193e5" dependencies = [ "common-path", "derive-syn-parse", @@ -1723,6 +1932,7 @@ dependencies = [ "regex", "syn 2.0.20", "termcolor", + "toml 0.7.3", "walkdir", ] @@ -2131,6 +2341,19 @@ dependencies = [ "subtle", ] +[[package]] +name = "fflonk" +version = "0.1.0" +source = "git+https://github.com/w3f/fflonk#26a5045b24e169cffc1f9328ca83d71061145c40" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "merlin 3.0.0", +] + [[package]] name = "fiat-crypto" version = "0.1.20" @@ -2223,7 +2446,7 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fork-tree" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "parity-scale-codec", ] @@ -2246,7 +2469,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frame-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-support", "frame-support-procedural", @@ -2271,7 +2494,7 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "Inflector", "array-bytes", @@ -2319,7 +2542,7 @@ dependencies = [ [[package]] name = "frame-election-provider-solution-type" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2330,7 +2553,7 @@ dependencies = [ [[package]] name = "frame-election-provider-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-election-provider-solution-type", "frame-support", @@ -2347,7 +2570,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-support", "frame-system", @@ -2376,7 +2599,7 @@ dependencies = [ [[package]] name = "frame-remote-externalities" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-recursion", "futures", @@ -2397,7 +2620,7 @@ dependencies = [ [[package]] name = "frame-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "aquamarine", "bitflags", @@ -2434,7 +2657,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "Inflector", "cfg-expr", @@ -2452,7 +2675,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", @@ -2464,7 +2687,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "proc-macro2", "quote", @@ -2474,7 +2697,7 @@ dependencies = [ [[package]] name = "frame-support-test" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-executive", @@ -2501,7 +2724,7 @@ dependencies = [ [[package]] name = "frame-support-test-pallet" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-support", "frame-system", @@ -2514,7 +2737,7 @@ dependencies = [ [[package]] name = "frame-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "cfg-if", "frame-support", @@ -2533,7 +2756,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -2548,7 +2771,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "parity-scale-codec", "sp-api", @@ -2557,7 +2780,7 @@ dependencies = [ [[package]] name = "frame-try-runtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-support", "parity-scale-codec", @@ -2739,7 +2962,7 @@ dependencies = [ [[package]] name = "generate-bags" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "chrono", "frame-election-provider-support", @@ -4548,6 +4771,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "merlin" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58c38e2799fc0978b65dfff8023ec7843e2330bb462f19198840b34b6582397d" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.6.4", + "zeroize", +] + [[package]] name = "mick-jaeger" version = "0.1.8" @@ -4594,7 +4829,7 @@ dependencies = [ [[package]] name = "mmr-gadget" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "futures", "log", @@ -4613,7 +4848,7 @@ dependencies = [ [[package]] name = "mmr-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "anyhow", "jsonrpsee", @@ -5139,7 +5374,7 @@ dependencies = [ [[package]] name = "pallet-assets" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5154,7 +5389,7 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-support", "frame-system", @@ -5170,7 +5405,7 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-support", "frame-system", @@ -5184,7 +5419,7 @@ dependencies = [ [[package]] name = "pallet-babe" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5208,7 +5443,7 @@ dependencies = [ [[package]] name = "pallet-bags-list" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5228,7 +5463,7 @@ dependencies = [ [[package]] name = "pallet-bags-list-remote-tests" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-election-provider-support", "frame-remote-externalities", @@ -5247,7 +5482,7 @@ dependencies = [ [[package]] name = "pallet-balances" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5262,7 +5497,7 @@ dependencies = [ [[package]] name = "pallet-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-support", "frame-system", @@ -5281,7 +5516,7 @@ dependencies = [ [[package]] name = "pallet-beefy-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "array-bytes", "binary-merkle-tree", @@ -5305,7 +5540,7 @@ dependencies = [ [[package]] name = "pallet-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5323,7 +5558,7 @@ dependencies = [ [[package]] name = "pallet-child-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5342,7 +5577,7 @@ dependencies = [ [[package]] name = "pallet-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5359,7 +5594,7 @@ dependencies = [ [[package]] name = "pallet-conviction-voting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "assert_matches", "frame-benchmarking", @@ -5376,7 +5611,7 @@ dependencies = [ [[package]] name = "pallet-democracy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5394,7 +5629,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-multi-phase" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5417,7 +5652,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-support-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5430,7 +5665,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5449,7 +5684,7 @@ dependencies = [ [[package]] name = "pallet-fast-unstake" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "docify", "frame-benchmarking", @@ -5468,7 +5703,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5491,7 +5726,7 @@ dependencies = [ [[package]] name = "pallet-identity" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "enumflags2", "frame-benchmarking", @@ -5507,7 +5742,7 @@ dependencies = [ [[package]] name = "pallet-im-online" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5527,7 +5762,7 @@ dependencies = [ [[package]] name = "pallet-indices" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5544,7 +5779,7 @@ dependencies = [ [[package]] name = "pallet-membership" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5561,7 +5796,7 @@ dependencies = [ [[package]] name = "pallet-message-queue" version = "7.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5580,7 +5815,7 @@ dependencies = [ [[package]] name = "pallet-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5597,7 +5832,7 @@ dependencies = [ [[package]] name = "pallet-multisig" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5613,7 +5848,7 @@ dependencies = [ [[package]] name = "pallet-nis" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5629,7 +5864,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-support", "frame-system", @@ -5648,7 +5883,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-benchmarking" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5668,7 +5903,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-runtime-api" version = "1.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "pallet-nomination-pools", "parity-scale-codec", @@ -5679,7 +5914,7 @@ dependencies = [ [[package]] name = "pallet-offences" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-support", "frame-system", @@ -5696,7 +5931,7 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5720,7 +5955,7 @@ dependencies = [ [[package]] name = "pallet-preimage" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5737,7 +5972,7 @@ dependencies = [ [[package]] name = "pallet-proxy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5752,7 +5987,7 @@ dependencies = [ [[package]] name = "pallet-ranked-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5770,7 +6005,7 @@ dependencies = [ [[package]] name = "pallet-recovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5785,7 +6020,7 @@ dependencies = [ [[package]] name = "pallet-referenda" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "assert_matches", "frame-benchmarking", @@ -5804,7 +6039,7 @@ dependencies = [ [[package]] name = "pallet-scheduler" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5821,7 +6056,7 @@ dependencies = [ [[package]] name = "pallet-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-support", "frame-system", @@ -5842,7 +6077,7 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5858,12 +6093,11 @@ dependencies = [ [[package]] name = "pallet-society" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "hex-literal 0.3.4", "log", "parity-scale-codec", "rand_chacha 0.2.2", @@ -5877,7 +6111,7 @@ dependencies = [ [[package]] name = "pallet-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5900,7 +6134,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -5911,7 +6145,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-fn" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "log", "sp-arithmetic", @@ -5920,7 +6154,7 @@ dependencies = [ [[package]] name = "pallet-staking-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "parity-scale-codec", "sp-api", @@ -5929,7 +6163,7 @@ dependencies = [ [[package]] name = "pallet-state-trie-migration" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5946,7 +6180,7 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5961,7 +6195,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5979,7 +6213,7 @@ dependencies = [ [[package]] name = "pallet-tips" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5998,7 +6232,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-support", "frame-system", @@ -6014,7 +6248,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", @@ -6030,7 +6264,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "pallet-transaction-payment", "parity-scale-codec", @@ -6042,7 +6276,7 @@ dependencies = [ [[package]] name = "pallet-treasury" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -6059,7 +6293,7 @@ dependencies = [ [[package]] name = "pallet-uniques" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -6074,7 +6308,7 @@ dependencies = [ [[package]] name = "pallet-utility" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -6090,7 +6324,7 @@ dependencies = [ [[package]] name = "pallet-vesting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -6105,7 +6339,7 @@ dependencies = [ [[package]] name = "pallet-whitelist" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -6765,7 +6999,7 @@ dependencies = [ "kvdb", "kvdb-memorydb", "lru 0.11.0", - "merlin", + "merlin 2.0.1", "parity-scale-codec", "parking_lot 0.12.1", "polkadot-node-jaeger", @@ -8766,6 +9000,21 @@ dependencies = [ "subtle", ] +[[package]] +name = "ring" +version = "0.1.0" +source = "git+https://github.com/w3f/ring-proof#0e948f3c28cbacecdd3020403c4841c0eb339213" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "common", + "fflonk", + "merlin 3.0.0", +] + [[package]] name = "ring" version = "0.16.20" @@ -9014,7 +9263,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "539a2bfe908f471bfa933876bd1eb6a19cf2176d375f82ef7f99530a40e48c2c" dependencies = [ "log", - "ring", + "ring 0.16.20", "sct", "webpki", ] @@ -9026,7 +9275,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e32ca28af694bc1bbf399c33a516dbdf1c90090b8ab23c2bc24f834aa2247f5f" dependencies = [ "log", - "ring", + "ring 0.16.20", "rustls-webpki", "sct", ] @@ -9067,7 +9316,7 @@ version = "0.100.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" dependencies = [ - "ring", + "ring 0.16.20", "untrusted", ] @@ -9115,7 +9364,7 @@ dependencies = [ [[package]] name = "sc-allocator" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "log", "sp-core", @@ -9126,7 +9375,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "futures", @@ -9154,7 +9403,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "futures", "futures-timer", @@ -9177,7 +9426,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -9192,7 +9441,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "memmap2", "sc-chain-spec-derive", @@ -9211,7 +9460,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -9222,7 +9471,7 @@ dependencies = [ [[package]] name = "sc-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "array-bytes", "chrono", @@ -9261,7 +9510,7 @@ dependencies = [ [[package]] name = "sc-client-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "fnv", "futures", @@ -9287,7 +9536,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "hash-db", "kvdb", @@ -9313,7 +9562,7 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "futures", @@ -9338,7 +9587,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "fork-tree", @@ -9374,7 +9623,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "futures", "jsonrpsee", @@ -9396,7 +9645,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "array-bytes", "async-channel", @@ -9430,7 +9679,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "futures", "jsonrpsee", @@ -9449,7 +9698,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "fork-tree", "parity-scale-codec", @@ -9462,7 +9711,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "ahash 0.8.2", "array-bytes", @@ -9503,7 +9752,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "finality-grandpa", "futures", @@ -9523,7 +9772,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "futures", @@ -9546,7 +9795,7 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -9568,7 +9817,7 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "sc-allocator", "sp-maybe-compressed-blob", @@ -9580,7 +9829,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "anyhow", "cfg-if", @@ -9597,7 +9846,7 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "ansi_term", "futures", @@ -9613,7 +9862,7 @@ dependencies = [ [[package]] name = "sc-keystore" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "array-bytes", "parking_lot 0.12.1", @@ -9627,7 +9876,7 @@ dependencies = [ [[package]] name = "sc-network" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "array-bytes", "async-channel", @@ -9670,7 +9919,7 @@ dependencies = [ [[package]] name = "sc-network-bitswap" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-channel", "cid", @@ -9690,7 +9939,7 @@ dependencies = [ [[package]] name = "sc-network-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "bitflags", @@ -9707,7 +9956,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "ahash 0.8.2", "futures", @@ -9726,7 +9975,7 @@ dependencies = [ [[package]] name = "sc-network-light" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "array-bytes", "async-channel", @@ -9747,7 +9996,7 @@ dependencies = [ [[package]] name = "sc-network-sync" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "array-bytes", "async-channel", @@ -9781,7 +10030,7 @@ dependencies = [ [[package]] name = "sc-network-transactions" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "array-bytes", "futures", @@ -9799,7 +10048,7 @@ dependencies = [ [[package]] name = "sc-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "array-bytes", "bytes", @@ -9833,7 +10082,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -9842,7 +10091,7 @@ dependencies = [ [[package]] name = "sc-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "futures", "jsonrpsee", @@ -9873,7 +10122,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -9892,7 +10141,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "http", "jsonrpsee", @@ -9907,7 +10156,7 @@ dependencies = [ [[package]] name = "sc-rpc-spec-v2" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "array-bytes", "futures", @@ -9920,6 +10169,7 @@ dependencies = [ "sc-chain-spec", "sc-client-api", "sc-transaction-pool-api", + "sc-utils", "serde", "sp-api", "sp-blockchain", @@ -9933,7 +10183,7 @@ dependencies = [ [[package]] name = "sc-service" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "directories", @@ -9997,7 +10247,7 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "log", "parity-scale-codec", @@ -10008,7 +10258,7 @@ dependencies = [ [[package]] name = "sc-storage-monitor" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "clap 4.2.5", "fs4", @@ -10022,7 +10272,7 @@ dependencies = [ [[package]] name = "sc-sync-state-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -10041,7 +10291,7 @@ dependencies = [ [[package]] name = "sc-sysinfo" version = "6.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "futures", "libc", @@ -10060,7 +10310,7 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "chrono", "futures", @@ -10079,7 +10329,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "ansi_term", "atty", @@ -10108,7 +10358,7 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -10119,7 +10369,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "futures", @@ -10145,7 +10395,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "futures", @@ -10161,7 +10411,7 @@ dependencies = [ [[package]] name = "sc-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-channel", "futures", @@ -10230,7 +10480,7 @@ dependencies = [ "arrayvec 0.5.2", "curve25519-dalek 2.1.3", "getrandom 0.1.16", - "merlin", + "merlin 2.0.1", "rand 0.7.3", "rand_core 0.5.1", "sha2 0.8.2", @@ -10256,7 +10506,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ - "ring", + "ring 0.16.20", "untrusted", ] @@ -10520,9 +10770,9 @@ dependencies = [ [[package]] name = "sha3" -version = "0.10.0" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31f935e31cf406e8c0e96c2815a5516181b7004ae8c5f296293221e9b1e356bd" +checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" dependencies = [ "digest 0.10.7", "keccak", @@ -10664,7 +10914,7 @@ dependencies = [ "chacha20poly1305", "curve25519-dalek 4.0.0-rc.1", "rand_core 0.6.4", - "ring", + "ring 0.16.20", "rustc_version", "sha2 0.10.7", "subtle", @@ -10709,7 +10959,7 @@ dependencies = [ [[package]] name = "sp-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "hash-db", "log", @@ -10730,7 +10980,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "Inflector", "blake2", @@ -10744,7 +10994,7 @@ dependencies = [ [[package]] name = "sp-application-crypto" version = "23.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "parity-scale-codec", "scale-info", @@ -10757,7 +11007,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "16.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "integer-sqrt", "num-traits", @@ -10771,7 +11021,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "parity-scale-codec", "scale-info", @@ -10784,7 +11034,7 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "sp-api", "sp-inherents", @@ -10795,7 +11045,7 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "futures", "log", @@ -10813,7 +11063,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "futures", @@ -10828,7 +11078,7 @@ dependencies = [ [[package]] name = "sp-consensus-aura" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "parity-scale-codec", @@ -10845,7 +11095,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "parity-scale-codec", @@ -10864,7 +11114,7 @@ dependencies = [ [[package]] name = "sp-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "lazy_static", "parity-scale-codec", @@ -10883,7 +11133,7 @@ dependencies = [ [[package]] name = "sp-consensus-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "finality-grandpa", "log", @@ -10901,7 +11151,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "parity-scale-codec", "scale-info", @@ -10913,9 +11163,11 @@ dependencies = [ [[package]] name = "sp-core" version = "21.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "array-bytes", + "arrayvec 0.7.4", + "bandersnatch_vrfs", "bitflags", "blake2", "bounded-collections", @@ -10929,7 +11181,7 @@ dependencies = [ "lazy_static", "libsecp256k1", "log", - "merlin", + "merlin 2.0.1", "parity-scale-codec", "parking_lot 0.12.1", "paste", @@ -10958,7 +11210,7 @@ dependencies = [ [[package]] name = "sp-core-hashing" version = "9.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "blake2b_simd", "byteorder", @@ -10971,7 +11223,7 @@ dependencies = [ [[package]] name = "sp-core-hashing-proc-macro" version = "9.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "quote", "sp-core-hashing", @@ -10981,7 +11233,7 @@ dependencies = [ [[package]] name = "sp-database" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "kvdb", "parking_lot 0.12.1", @@ -10990,7 +11242,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "proc-macro2", "quote", @@ -11000,7 +11252,7 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.19.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "environmental", "parity-scale-codec", @@ -11011,7 +11263,7 @@ dependencies = [ [[package]] name = "sp-genesis-builder" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "serde_json", "sp-api", @@ -11022,7 +11274,7 @@ dependencies = [ [[package]] name = "sp-inherents" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -11036,7 +11288,7 @@ dependencies = [ [[package]] name = "sp-io" version = "23.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "bytes", "ed25519", @@ -11061,7 +11313,7 @@ dependencies = [ [[package]] name = "sp-keyring" version = "24.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "lazy_static", "sp-core", @@ -11072,7 +11324,7 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.27.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -11084,7 +11336,7 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "thiserror", "zstd 0.12.3+zstd.1.5.2", @@ -11093,7 +11345,7 @@ dependencies = [ [[package]] name = "sp-metadata-ir" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-metadata", "parity-scale-codec", @@ -11104,7 +11356,7 @@ dependencies = [ [[package]] name = "sp-mmr-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "ckb-merkle-mountain-range", "log", @@ -11122,7 +11374,7 @@ dependencies = [ [[package]] name = "sp-npos-elections" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "parity-scale-codec", "scale-info", @@ -11136,7 +11388,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "sp-api", "sp-core", @@ -11146,7 +11398,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "backtrace", "lazy_static", @@ -11156,7 +11408,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "rustc-hash", "serde", @@ -11166,7 +11418,7 @@ dependencies = [ [[package]] name = "sp-runtime" version = "24.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "either", "hash256-std-hasher", @@ -11188,7 +11440,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "17.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -11206,7 +11458,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "11.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "Inflector", "proc-macro-crate", @@ -11218,7 +11470,7 @@ dependencies = [ [[package]] name = "sp-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "parity-scale-codec", "scale-info", @@ -11233,7 +11485,7 @@ dependencies = [ [[package]] name = "sp-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -11247,7 +11499,7 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.28.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "hash-db", "log", @@ -11268,7 +11520,7 @@ dependencies = [ [[package]] name = "sp-statement-store" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "aes-gcm 0.10.2", "curve25519-dalek 3.2.0", @@ -11292,12 +11544,12 @@ dependencies = [ [[package]] name = "sp-std" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" [[package]] name = "sp-storage" version = "13.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "impl-serde", "parity-scale-codec", @@ -11310,7 +11562,7 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "parity-scale-codec", @@ -11323,7 +11575,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "10.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "parity-scale-codec", "sp-std", @@ -11335,7 +11587,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "sp-api", "sp-runtime", @@ -11344,7 +11596,7 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "parity-scale-codec", @@ -11359,7 +11611,7 @@ dependencies = [ [[package]] name = "sp-trie" version = "22.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "ahash 0.8.2", "hash-db", @@ -11382,7 +11634,7 @@ dependencies = [ [[package]] name = "sp-version" version = "22.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "impl-serde", "parity-scale-codec", @@ -11399,7 +11651,7 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "parity-scale-codec", "proc-macro2", @@ -11410,7 +11662,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "14.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "anyhow", "impl-trait-for-tuples", @@ -11423,7 +11675,7 @@ dependencies = [ [[package]] name = "sp-weights" version = "20.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "parity-scale-codec", "scale-info", @@ -11648,12 +11900,12 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" [[package]] name = "substrate-frame-rpc-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-system-rpc-runtime-api", "futures", @@ -11672,7 +11924,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "hyper", "log", @@ -11684,7 +11936,7 @@ dependencies = [ [[package]] name = "substrate-rpc-client" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "jsonrpsee", @@ -11697,7 +11949,7 @@ dependencies = [ [[package]] name = "substrate-state-trie-migration-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -11714,7 +11966,7 @@ dependencies = [ [[package]] name = "substrate-test-client" version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "array-bytes", "async-trait", @@ -11740,7 +11992,7 @@ dependencies = [ [[package]] name = "substrate-test-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "futures", "substrate-test-utils-derive", @@ -11750,7 +12002,7 @@ dependencies = [ [[package]] name = "substrate-test-utils-derive" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -11761,7 +12013,7 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "ansi_term", "build-helper", @@ -12638,7 +12890,7 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "try-runtime-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "clap 4.2.5", @@ -13315,7 +13567,7 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" dependencies = [ - "ring", + "ring 0.16.20", "untrusted", ] From c7bbfbac94aece90e6a9116d816234849ac0d31c Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Thu, 10 Aug 2023 15:29:43 +1000 Subject: [PATCH 21/35] Remove unused code in runtime/polkadot/src/lib.rs (#7540) * remove SetStorageVersions runtime upgrade * remove unused imports --- runtime/polkadot/src/lib.rs | 23 ----------------------- 1 file changed, 23 deletions(-) diff --git a/runtime/polkadot/src/lib.rs b/runtime/polkadot/src/lib.rs index 585e48dd5a4b..ac031671a4e6 100644 --- a/runtime/polkadot/src/lib.rs +++ b/runtime/polkadot/src/lib.rs @@ -1488,35 +1488,12 @@ pub type Migrations = migrations::Unreleased; #[allow(deprecated, missing_docs)] pub mod migrations { use super::*; - use frame_support::traits::{GetStorageVersion, OnRuntimeUpgrade, StorageVersion}; /// Unreleased migrations. Add new ones here: pub type Unreleased = ( pallet_im_online::migration::v1::Migration, parachains_configuration::migration::v7::MigrateToV7, ); - - /// Migrations that set `StorageVersion`s we missed to set. - pub struct SetStorageVersions; - - impl OnRuntimeUpgrade for SetStorageVersions { - fn on_runtime_upgrade() -> Weight { - // `Referenda` pallet was added on chain after the migration to version `1` was added. - // Thus, it never required the migration and we just missed to set the correct `StorageVersion`. - let storage_version = Referenda::on_chain_storage_version(); - if storage_version < 1 { - StorageVersion::new(1).put::(); - } - - // Was missed as part of: `runtime_common::session::migration::ClearOldSessionStorage`. - let storage_version = Historical::on_chain_storage_version(); - if storage_version < 1 { - StorageVersion::new(1).put::(); - } - - RocksDbWeight::get().reads_writes(2, 2) - } - } } /// Unchecked extrinsic type as expected by this runtime. From 08f4333df9abbb6c9de5f0c7f93b2038b7b8d399 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A7alo=20Pestana?= Date: Thu, 10 Aug 2023 10:42:43 +0200 Subject: [PATCH 22/35] Companion for substrate#12970 (#6807) * Runtime companion changes * updates runtime configs * Fixes runtime-test runtime configs * Uses ElectionBounds and builder from own mod * updates new bounds mod * Fixes test-runtime mock * update lockfile for {"substrate"} --------- Co-authored-by: parity-processbot <> --- Cargo.lock | 368 ++++++++++++++++---------------- runtime/kusama/src/lib.rs | 20 +- runtime/polkadot/src/lib.rs | 21 +- runtime/test-runtime/src/lib.rs | 17 +- runtime/westend/src/lib.rs | 19 +- 5 files changed, 225 insertions(+), 220 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 69571875070f..8fcd122e701a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -617,7 +617,7 @@ dependencies = [ [[package]] name = "binary-merkle-tree" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "hash-db", "log", @@ -2446,7 +2446,7 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fork-tree" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "parity-scale-codec", ] @@ -2469,7 +2469,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frame-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-support", "frame-support-procedural", @@ -2494,7 +2494,7 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "Inflector", "array-bytes", @@ -2542,7 +2542,7 @@ dependencies = [ [[package]] name = "frame-election-provider-solution-type" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2553,7 +2553,7 @@ dependencies = [ [[package]] name = "frame-election-provider-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-election-provider-solution-type", "frame-support", @@ -2570,7 +2570,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-support", "frame-system", @@ -2599,7 +2599,7 @@ dependencies = [ [[package]] name = "frame-remote-externalities" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-recursion", "futures", @@ -2620,7 +2620,7 @@ dependencies = [ [[package]] name = "frame-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "aquamarine", "bitflags", @@ -2657,7 +2657,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "Inflector", "cfg-expr", @@ -2675,7 +2675,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", @@ -2687,7 +2687,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "proc-macro2", "quote", @@ -2697,7 +2697,7 @@ dependencies = [ [[package]] name = "frame-support-test" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-executive", @@ -2724,7 +2724,7 @@ dependencies = [ [[package]] name = "frame-support-test-pallet" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-support", "frame-system", @@ -2737,7 +2737,7 @@ dependencies = [ [[package]] name = "frame-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "cfg-if", "frame-support", @@ -2756,7 +2756,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -2771,7 +2771,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "parity-scale-codec", "sp-api", @@ -2780,7 +2780,7 @@ dependencies = [ [[package]] name = "frame-try-runtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-support", "parity-scale-codec", @@ -2962,7 +2962,7 @@ dependencies = [ [[package]] name = "generate-bags" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "chrono", "frame-election-provider-support", @@ -4829,7 +4829,7 @@ dependencies = [ [[package]] name = "mmr-gadget" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "futures", "log", @@ -4848,7 +4848,7 @@ dependencies = [ [[package]] name = "mmr-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "anyhow", "jsonrpsee", @@ -5374,7 +5374,7 @@ dependencies = [ [[package]] name = "pallet-assets" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5389,7 +5389,7 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-support", "frame-system", @@ -5405,7 +5405,7 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-support", "frame-system", @@ -5419,7 +5419,7 @@ dependencies = [ [[package]] name = "pallet-babe" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5443,7 +5443,7 @@ dependencies = [ [[package]] name = "pallet-bags-list" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5463,7 +5463,7 @@ dependencies = [ [[package]] name = "pallet-bags-list-remote-tests" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-election-provider-support", "frame-remote-externalities", @@ -5482,7 +5482,7 @@ dependencies = [ [[package]] name = "pallet-balances" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5497,7 +5497,7 @@ dependencies = [ [[package]] name = "pallet-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-support", "frame-system", @@ -5516,7 +5516,7 @@ dependencies = [ [[package]] name = "pallet-beefy-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "array-bytes", "binary-merkle-tree", @@ -5540,7 +5540,7 @@ dependencies = [ [[package]] name = "pallet-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5558,7 +5558,7 @@ dependencies = [ [[package]] name = "pallet-child-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5577,7 +5577,7 @@ dependencies = [ [[package]] name = "pallet-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5594,7 +5594,7 @@ dependencies = [ [[package]] name = "pallet-conviction-voting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "assert_matches", "frame-benchmarking", @@ -5611,7 +5611,7 @@ dependencies = [ [[package]] name = "pallet-democracy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5629,7 +5629,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-multi-phase" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5652,7 +5652,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-support-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5665,7 +5665,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5684,7 +5684,7 @@ dependencies = [ [[package]] name = "pallet-fast-unstake" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "docify", "frame-benchmarking", @@ -5703,7 +5703,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5726,7 +5726,7 @@ dependencies = [ [[package]] name = "pallet-identity" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "enumflags2", "frame-benchmarking", @@ -5742,7 +5742,7 @@ dependencies = [ [[package]] name = "pallet-im-online" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5762,7 +5762,7 @@ dependencies = [ [[package]] name = "pallet-indices" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5779,7 +5779,7 @@ dependencies = [ [[package]] name = "pallet-membership" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5796,7 +5796,7 @@ dependencies = [ [[package]] name = "pallet-message-queue" version = "7.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5815,7 +5815,7 @@ dependencies = [ [[package]] name = "pallet-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5832,7 +5832,7 @@ dependencies = [ [[package]] name = "pallet-multisig" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5848,7 +5848,7 @@ dependencies = [ [[package]] name = "pallet-nis" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5864,7 +5864,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-support", "frame-system", @@ -5883,7 +5883,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-benchmarking" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5903,7 +5903,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-runtime-api" version = "1.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "pallet-nomination-pools", "parity-scale-codec", @@ -5914,7 +5914,7 @@ dependencies = [ [[package]] name = "pallet-offences" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-support", "frame-system", @@ -5931,7 +5931,7 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5955,7 +5955,7 @@ dependencies = [ [[package]] name = "pallet-preimage" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5972,7 +5972,7 @@ dependencies = [ [[package]] name = "pallet-proxy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5987,7 +5987,7 @@ dependencies = [ [[package]] name = "pallet-ranked-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -6005,7 +6005,7 @@ dependencies = [ [[package]] name = "pallet-recovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -6020,7 +6020,7 @@ dependencies = [ [[package]] name = "pallet-referenda" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "assert_matches", "frame-benchmarking", @@ -6039,7 +6039,7 @@ dependencies = [ [[package]] name = "pallet-scheduler" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -6056,7 +6056,7 @@ dependencies = [ [[package]] name = "pallet-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-support", "frame-system", @@ -6077,7 +6077,7 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -6093,7 +6093,7 @@ dependencies = [ [[package]] name = "pallet-society" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -6111,7 +6111,7 @@ dependencies = [ [[package]] name = "pallet-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -6134,7 +6134,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -6145,7 +6145,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-fn" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "log", "sp-arithmetic", @@ -6154,7 +6154,7 @@ dependencies = [ [[package]] name = "pallet-staking-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "parity-scale-codec", "sp-api", @@ -6163,7 +6163,7 @@ dependencies = [ [[package]] name = "pallet-state-trie-migration" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -6180,7 +6180,7 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -6195,7 +6195,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -6213,7 +6213,7 @@ dependencies = [ [[package]] name = "pallet-tips" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -6232,7 +6232,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-support", "frame-system", @@ -6248,7 +6248,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", @@ -6264,7 +6264,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "pallet-transaction-payment", "parity-scale-codec", @@ -6276,7 +6276,7 @@ dependencies = [ [[package]] name = "pallet-treasury" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -6293,7 +6293,7 @@ dependencies = [ [[package]] name = "pallet-uniques" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -6308,7 +6308,7 @@ dependencies = [ [[package]] name = "pallet-utility" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -6324,7 +6324,7 @@ dependencies = [ [[package]] name = "pallet-vesting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -6339,7 +6339,7 @@ dependencies = [ [[package]] name = "pallet-whitelist" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -9364,7 +9364,7 @@ dependencies = [ [[package]] name = "sc-allocator" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "log", "sp-core", @@ -9375,7 +9375,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "futures", @@ -9403,7 +9403,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "futures", "futures-timer", @@ -9426,7 +9426,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -9441,7 +9441,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "memmap2", "sc-chain-spec-derive", @@ -9460,7 +9460,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -9471,7 +9471,7 @@ dependencies = [ [[package]] name = "sc-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "array-bytes", "chrono", @@ -9510,7 +9510,7 @@ dependencies = [ [[package]] name = "sc-client-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "fnv", "futures", @@ -9536,7 +9536,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "hash-db", "kvdb", @@ -9562,7 +9562,7 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "futures", @@ -9587,7 +9587,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "fork-tree", @@ -9623,7 +9623,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "futures", "jsonrpsee", @@ -9645,7 +9645,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "array-bytes", "async-channel", @@ -9679,7 +9679,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "futures", "jsonrpsee", @@ -9698,7 +9698,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "fork-tree", "parity-scale-codec", @@ -9711,7 +9711,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "ahash 0.8.2", "array-bytes", @@ -9752,7 +9752,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "finality-grandpa", "futures", @@ -9772,7 +9772,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "futures", @@ -9795,7 +9795,7 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -9817,7 +9817,7 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "sc-allocator", "sp-maybe-compressed-blob", @@ -9829,7 +9829,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "anyhow", "cfg-if", @@ -9846,7 +9846,7 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "ansi_term", "futures", @@ -9862,7 +9862,7 @@ dependencies = [ [[package]] name = "sc-keystore" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "array-bytes", "parking_lot 0.12.1", @@ -9876,7 +9876,7 @@ dependencies = [ [[package]] name = "sc-network" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "array-bytes", "async-channel", @@ -9919,7 +9919,7 @@ dependencies = [ [[package]] name = "sc-network-bitswap" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-channel", "cid", @@ -9939,7 +9939,7 @@ dependencies = [ [[package]] name = "sc-network-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "bitflags", @@ -9956,7 +9956,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "ahash 0.8.2", "futures", @@ -9975,7 +9975,7 @@ dependencies = [ [[package]] name = "sc-network-light" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "array-bytes", "async-channel", @@ -9996,7 +9996,7 @@ dependencies = [ [[package]] name = "sc-network-sync" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "array-bytes", "async-channel", @@ -10030,7 +10030,7 @@ dependencies = [ [[package]] name = "sc-network-transactions" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "array-bytes", "futures", @@ -10048,7 +10048,7 @@ dependencies = [ [[package]] name = "sc-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "array-bytes", "bytes", @@ -10082,7 +10082,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -10091,7 +10091,7 @@ dependencies = [ [[package]] name = "sc-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "futures", "jsonrpsee", @@ -10122,7 +10122,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -10141,7 +10141,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "http", "jsonrpsee", @@ -10156,7 +10156,7 @@ dependencies = [ [[package]] name = "sc-rpc-spec-v2" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "array-bytes", "futures", @@ -10183,7 +10183,7 @@ dependencies = [ [[package]] name = "sc-service" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "directories", @@ -10247,7 +10247,7 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "log", "parity-scale-codec", @@ -10258,7 +10258,7 @@ dependencies = [ [[package]] name = "sc-storage-monitor" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "clap 4.2.5", "fs4", @@ -10272,7 +10272,7 @@ dependencies = [ [[package]] name = "sc-sync-state-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -10291,7 +10291,7 @@ dependencies = [ [[package]] name = "sc-sysinfo" version = "6.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "futures", "libc", @@ -10310,7 +10310,7 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "chrono", "futures", @@ -10329,7 +10329,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "ansi_term", "atty", @@ -10358,7 +10358,7 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -10369,7 +10369,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "futures", @@ -10395,7 +10395,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "futures", @@ -10411,7 +10411,7 @@ dependencies = [ [[package]] name = "sc-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-channel", "futures", @@ -10959,7 +10959,7 @@ dependencies = [ [[package]] name = "sp-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "hash-db", "log", @@ -10980,7 +10980,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "Inflector", "blake2", @@ -10994,7 +10994,7 @@ dependencies = [ [[package]] name = "sp-application-crypto" version = "23.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "parity-scale-codec", "scale-info", @@ -11007,7 +11007,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "16.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "integer-sqrt", "num-traits", @@ -11021,7 +11021,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "parity-scale-codec", "scale-info", @@ -11034,7 +11034,7 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "sp-api", "sp-inherents", @@ -11045,7 +11045,7 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "futures", "log", @@ -11063,7 +11063,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "futures", @@ -11078,7 +11078,7 @@ dependencies = [ [[package]] name = "sp-consensus-aura" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "parity-scale-codec", @@ -11095,7 +11095,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "parity-scale-codec", @@ -11114,7 +11114,7 @@ dependencies = [ [[package]] name = "sp-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "lazy_static", "parity-scale-codec", @@ -11133,7 +11133,7 @@ dependencies = [ [[package]] name = "sp-consensus-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "finality-grandpa", "log", @@ -11151,7 +11151,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "parity-scale-codec", "scale-info", @@ -11163,7 +11163,7 @@ dependencies = [ [[package]] name = "sp-core" version = "21.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "array-bytes", "arrayvec 0.7.4", @@ -11210,7 +11210,7 @@ dependencies = [ [[package]] name = "sp-core-hashing" version = "9.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "blake2b_simd", "byteorder", @@ -11223,7 +11223,7 @@ dependencies = [ [[package]] name = "sp-core-hashing-proc-macro" version = "9.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "quote", "sp-core-hashing", @@ -11233,7 +11233,7 @@ dependencies = [ [[package]] name = "sp-database" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "kvdb", "parking_lot 0.12.1", @@ -11242,7 +11242,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "proc-macro2", "quote", @@ -11252,7 +11252,7 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.19.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "environmental", "parity-scale-codec", @@ -11263,7 +11263,7 @@ dependencies = [ [[package]] name = "sp-genesis-builder" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "serde_json", "sp-api", @@ -11274,7 +11274,7 @@ dependencies = [ [[package]] name = "sp-inherents" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -11288,7 +11288,7 @@ dependencies = [ [[package]] name = "sp-io" version = "23.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "bytes", "ed25519", @@ -11313,7 +11313,7 @@ dependencies = [ [[package]] name = "sp-keyring" version = "24.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "lazy_static", "sp-core", @@ -11324,7 +11324,7 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.27.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -11336,7 +11336,7 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "thiserror", "zstd 0.12.3+zstd.1.5.2", @@ -11345,7 +11345,7 @@ dependencies = [ [[package]] name = "sp-metadata-ir" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-metadata", "parity-scale-codec", @@ -11356,7 +11356,7 @@ dependencies = [ [[package]] name = "sp-mmr-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "ckb-merkle-mountain-range", "log", @@ -11374,7 +11374,7 @@ dependencies = [ [[package]] name = "sp-npos-elections" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "parity-scale-codec", "scale-info", @@ -11388,7 +11388,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "sp-api", "sp-core", @@ -11398,7 +11398,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "backtrace", "lazy_static", @@ -11408,7 +11408,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "rustc-hash", "serde", @@ -11418,7 +11418,7 @@ dependencies = [ [[package]] name = "sp-runtime" version = "24.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "either", "hash256-std-hasher", @@ -11440,7 +11440,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "17.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -11458,7 +11458,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "11.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "Inflector", "proc-macro-crate", @@ -11470,7 +11470,7 @@ dependencies = [ [[package]] name = "sp-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "parity-scale-codec", "scale-info", @@ -11485,7 +11485,7 @@ dependencies = [ [[package]] name = "sp-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -11499,7 +11499,7 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.28.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "hash-db", "log", @@ -11520,7 +11520,7 @@ dependencies = [ [[package]] name = "sp-statement-store" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "aes-gcm 0.10.2", "curve25519-dalek 3.2.0", @@ -11544,12 +11544,12 @@ dependencies = [ [[package]] name = "sp-std" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" [[package]] name = "sp-storage" version = "13.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "impl-serde", "parity-scale-codec", @@ -11562,7 +11562,7 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "parity-scale-codec", @@ -11575,7 +11575,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "10.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "parity-scale-codec", "sp-std", @@ -11587,7 +11587,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "sp-api", "sp-runtime", @@ -11596,7 +11596,7 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "parity-scale-codec", @@ -11611,7 +11611,7 @@ dependencies = [ [[package]] name = "sp-trie" version = "22.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "ahash 0.8.2", "hash-db", @@ -11634,7 +11634,7 @@ dependencies = [ [[package]] name = "sp-version" version = "22.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "impl-serde", "parity-scale-codec", @@ -11651,7 +11651,7 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "parity-scale-codec", "proc-macro2", @@ -11662,7 +11662,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "14.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "anyhow", "impl-trait-for-tuples", @@ -11675,7 +11675,7 @@ dependencies = [ [[package]] name = "sp-weights" version = "20.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "parity-scale-codec", "scale-info", @@ -11900,12 +11900,12 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" [[package]] name = "substrate-frame-rpc-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-system-rpc-runtime-api", "futures", @@ -11924,7 +11924,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "hyper", "log", @@ -11936,7 +11936,7 @@ dependencies = [ [[package]] name = "substrate-rpc-client" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "jsonrpsee", @@ -11949,7 +11949,7 @@ dependencies = [ [[package]] name = "substrate-state-trie-migration-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -11966,7 +11966,7 @@ dependencies = [ [[package]] name = "substrate-test-client" version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "array-bytes", "async-trait", @@ -11992,7 +11992,7 @@ dependencies = [ [[package]] name = "substrate-test-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "futures", "substrate-test-utils-derive", @@ -12002,7 +12002,7 @@ dependencies = [ [[package]] name = "substrate-test-utils-derive" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -12013,7 +12013,7 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "ansi_term", "build-helper", @@ -12890,7 +12890,7 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "try-runtime-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "clap 4.2.5", diff --git a/runtime/kusama/src/lib.rs b/runtime/kusama/src/lib.rs index 335ef79fab58..0248b02e12f6 100644 --- a/runtime/kusama/src/lib.rs +++ b/runtime/kusama/src/lib.rs @@ -53,7 +53,8 @@ use runtime_parachains::{ use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; use beefy_primitives::ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}; use frame_election_provider_support::{ - generate_solution_type, onchain, NposSolution, SequentialPhragmen, + bounds::ElectionBoundsBuilder, generate_solution_type, onchain, NposSolution, + SequentialPhragmen, }; use frame_support::{ construct_runtime, parameter_types, @@ -399,11 +400,12 @@ parameter_types! { // 1 hour session, 15 minutes unsigned phase, 8 offchain executions. pub OffchainRepeat: BlockNumber = UnsignedPhase::get() / 8; - /// We take the top 12500 nominators as electing voters.. pub const MaxElectingVoters: u32 = 12_500; - /// ... and all of the validators as electable targets. Whilst this is the case, we cannot and - /// shall not increase the size of the validator intentions. - pub const MaxElectableTargets: u16 = u16::MAX; + /// We take the top 12500 nominators as electing voters and all of the validators as electable + /// targets. Whilst this is the case, we cannot and shall not increase the size of the + /// validator intentions. + pub ElectionBounds: frame_election_provider_support::bounds::ElectionBounds = + ElectionBoundsBuilder::default().voters_count(MaxElectingVoters::get().into()).build(); pub NposSolutionPriority: TransactionPriority = Perbill::from_percent(90) * TransactionPriority::max_value(); /// Setup election pallet to support maximum winners upto 2000. This will mean Staking Pallet @@ -428,8 +430,7 @@ impl onchain::Config for OnChainSeqPhragmen { type DataProvider = Staking; type WeightInfo = weights::frame_election_provider_support::WeightInfo; type MaxWinners = MaxActiveValidators; - type VotersBound = MaxElectingVoters; - type TargetsBound = MaxElectableTargets; + type Bounds = ElectionBounds; } impl pallet_election_provider_multi_phase::MinerConfig for Runtime { @@ -495,9 +496,8 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type BenchmarkingConfig = runtime_common::elections::BenchmarkConfig; type ForceOrigin = EitherOf, StakingAdmin>; type WeightInfo = weights::pallet_election_provider_multi_phase::WeightInfo; - type MaxElectingVoters = MaxElectingVoters; - type MaxElectableTargets = MaxElectableTargets; type MaxWinners = MaxActiveValidators; + type ElectionBounds = ElectionBounds; } parameter_types! { @@ -564,7 +564,6 @@ parameter_types! { } impl pallet_staking::Config for Runtime { - type MaxNominations = MaxNominations; type Currency = Balances; type CurrencyBalance = Balance; type UnixTime = Timestamp; @@ -586,6 +585,7 @@ impl pallet_staking::Config for Runtime { type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type VoterList = VoterList; type TargetList = UseValidatorsMap; + type NominationsQuota = pallet_staking::FixedNominationsQuota<{ MaxNominations::get() }>; type MaxUnlockingChunks = frame_support::traits::ConstU32<32>; type HistoryDepth = frame_support::traits::ConstU32<84>; type BenchmarkingConfig = runtime_common::StakingBenchmarkingConfig; diff --git a/runtime/polkadot/src/lib.rs b/runtime/polkadot/src/lib.rs index ac031671a4e6..fbf896cdedc5 100644 --- a/runtime/polkadot/src/lib.rs +++ b/runtime/polkadot/src/lib.rs @@ -40,7 +40,9 @@ use runtime_parachains::{ use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; use beefy_primitives::ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}; -use frame_election_provider_support::{generate_solution_type, onchain, SequentialPhragmen}; +use frame_election_provider_support::{ + bounds::ElectionBoundsBuilder, generate_solution_type, onchain, SequentialPhragmen, +}; use frame_support::{ construct_runtime, parameter_types, traits::{ @@ -393,11 +395,12 @@ parameter_types! { // 4 hour session, 1 hour unsigned phase, 32 offchain executions. pub OffchainRepeat: BlockNumber = UnsignedPhase::get() / 32; - /// We take the top 22500 nominators as electing voters.. pub const MaxElectingVoters: u32 = 22_500; - /// ... and all of the validators as electable targets. Whilst this is the case, we cannot and - /// shall not increase the size of the validator intentions. - pub const MaxElectableTargets: u16 = u16::MAX; + /// We take the top 22500 nominators as electing voters and all of the validators as electable + /// targets. Whilst this is the case, we cannot and shall not increase the size of the + /// validator intentions. + pub ElectionBounds: frame_election_provider_support::bounds::ElectionBounds = + ElectionBoundsBuilder::default().voters_count(MaxElectingVoters::get().into()).build(); /// Setup election pallet to support maximum winners upto 1200. This will mean Staking Pallet /// cannot have active validators higher than this count. pub const MaxActiveValidators: u32 = 1200; @@ -420,8 +423,7 @@ impl onchain::Config for OnChainSeqPhragmen { type DataProvider = Staking; type WeightInfo = weights::frame_election_provider_support::WeightInfo; type MaxWinners = MaxActiveValidators; - type VotersBound = MaxElectingVoters; - type TargetsBound = MaxElectableTargets; + type Bounds = ElectionBounds; } impl pallet_election_provider_multi_phase::MinerConfig for Runtime { @@ -487,9 +489,8 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type BenchmarkingConfig = runtime_common::elections::BenchmarkConfig; type ForceOrigin = EitherOf, StakingAdmin>; type WeightInfo = weights::pallet_election_provider_multi_phase::WeightInfo; - type MaxElectingVoters = MaxElectingVoters; - type MaxElectableTargets = MaxElectableTargets; type MaxWinners = MaxActiveValidators; + type ElectionBounds = ElectionBounds; } parameter_types! { @@ -572,7 +573,6 @@ impl pallet_staking::EraPayout for EraPayout { } impl pallet_staking::Config for Runtime { - type MaxNominations = MaxNominations; type Currency = Balances; type CurrencyBalance = Balance; type UnixTime = Timestamp; @@ -594,6 +594,7 @@ impl pallet_staking::Config for Runtime { type GenesisElectionProvider = onchain::OnChainExecution; type VoterList = VoterList; type TargetList = UseValidatorsMap; + type NominationsQuota = pallet_staking::FixedNominationsQuota<{ MaxNominations::get() }>; type MaxUnlockingChunks = frame_support::traits::ConstU32<32>; type HistoryDepth = frame_support::traits::ConstU32<84>; type BenchmarkingConfig = runtime_common::StakingBenchmarkingConfig; diff --git a/runtime/test-runtime/src/lib.rs b/runtime/test-runtime/src/lib.rs index 9e2f2a66455b..c9f3aa6cb203 100644 --- a/runtime/test-runtime/src/lib.rs +++ b/runtime/test-runtime/src/lib.rs @@ -36,7 +36,10 @@ use polkadot_runtime_parachains::{ use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; use beefy_primitives::ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}; -use frame_election_provider_support::{onchain, SequentialPhragmen}; +use frame_election_provider_support::{ + bounds::{ElectionBounds, ElectionBoundsBuilder}, + onchain, SequentialPhragmen, +}; use frame_support::{ construct_runtime, parameter_types, traits::{Everything, KeyOwnerProofSystem, WithdrawReasons}, @@ -315,8 +318,8 @@ parameter_types! { pub storage OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); pub const MaxAuthorities: u32 = 100_000; pub const OnChainMaxWinners: u32 = u32::MAX; - pub const MaxElectingVoters: u32 = u32::MAX; - pub const MaxElectableTargets: u16 = u16::MAX; + // Unbounded number of election targets and voters. + pub ElectionBoundsOnChain: ElectionBounds = ElectionBoundsBuilder::default().build(); } pub struct OnChainSeqPhragmen; @@ -325,13 +328,14 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); + type Bounds = ElectionBoundsOnChain; type MaxWinners = OnChainMaxWinners; - type VotersBound = MaxElectingVoters; - type TargetsBound = MaxElectableTargets; } +/// Upper limit on the number of NPOS nominations. +const MAX_QUOTA_NOMINATIONS: u32 = 16; + impl pallet_staking::Config for Runtime { - type MaxNominations = frame_support::pallet_prelude::ConstU32<16>; type Currency = Balances; type CurrencyBalance = Balance; type UnixTime = Timestamp; @@ -355,6 +359,7 @@ impl pallet_staking::Config for Runtime { // to bags-list is a no-op, but the storage version will be updated. type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; + type NominationsQuota = pallet_staking::FixedNominationsQuota; type MaxUnlockingChunks = frame_support::traits::ConstU32<32>; type HistoryDepth = frame_support::traits::ConstU32<84>; type BenchmarkingConfig = runtime_common::StakingBenchmarkingConfig; diff --git a/runtime/westend/src/lib.rs b/runtime/westend/src/lib.rs index dd4bcff32e39..4b4659442cff 100644 --- a/runtime/westend/src/lib.rs +++ b/runtime/westend/src/lib.rs @@ -22,7 +22,7 @@ use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; use beefy_primitives::ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}; -use frame_election_provider_support::{onchain, SequentialPhragmen}; +use frame_election_provider_support::{bounds::ElectionBoundsBuilder, onchain, SequentialPhragmen}; use frame_support::{ construct_runtime, parameter_types, traits::{ @@ -371,11 +371,12 @@ parameter_types! { // 1 hour session, 15 minutes unsigned phase, 4 offchain executions. pub OffchainRepeat: BlockNumber = UnsignedPhase::get() / 4; - /// We take the top 22500 nominators as electing voters.. pub const MaxElectingVoters: u32 = 22_500; - /// ... and all of the validators as electable targets. Whilst this is the case, we cannot and - /// shall not increase the size of the validator intentions. - pub const MaxElectableTargets: u16 = u16::MAX; + /// We take the top 22500 nominators as electing voters and all of the validators as electable + /// targets. Whilst this is the case, we cannot and shall not increase the size of the + /// validator intentions. + pub ElectionBounds: frame_election_provider_support::bounds::ElectionBounds = + ElectionBoundsBuilder::default().voters_count(MaxElectingVoters::get().into()).build(); // Maximum winners that can be chosen as active validators pub const MaxActiveValidators: u32 = 1000; @@ -398,8 +399,7 @@ impl onchain::Config for OnChainSeqPhragmen { type DataProvider = Staking; type WeightInfo = weights::frame_election_provider_support::WeightInfo; type MaxWinners = MaxActiveValidators; - type VotersBound = MaxElectingVoters; - type TargetsBound = MaxElectableTargets; + type Bounds = ElectionBounds; } impl pallet_election_provider_multi_phase::MinerConfig for Runtime { @@ -465,9 +465,8 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type BenchmarkingConfig = runtime_common::elections::BenchmarkConfig; type ForceOrigin = EnsureRoot; type WeightInfo = weights::pallet_election_provider_multi_phase::WeightInfo; - type MaxElectingVoters = MaxElectingVoters; - type MaxElectableTargets = MaxElectableTargets; type MaxWinners = MaxActiveValidators; + type ElectionBounds = ElectionBounds; } parameter_types! { @@ -508,7 +507,6 @@ parameter_types! { } impl pallet_staking::Config for Runtime { - type MaxNominations = MaxNominations; type Currency = Balances; type CurrencyBalance = Balance; type UnixTime = Timestamp; @@ -530,6 +528,7 @@ impl pallet_staking::Config for Runtime { type GenesisElectionProvider = onchain::OnChainExecution; type VoterList = VoterList; type TargetList = UseValidatorsMap; + type NominationsQuota = pallet_staking::FixedNominationsQuota<{ MaxNominations::get() }>; type MaxUnlockingChunks = frame_support::traits::ConstU32<32>; type HistoryDepth = frame_support::traits::ConstU32<84>; type BenchmarkingConfig = runtime_common::StakingBenchmarkingConfig; From 0f27b6c2f9efaa0298b45ad94e767ffdb94a7e12 Mon Sep 17 00:00:00 2001 From: Andrei Eres Date: Thu, 10 Aug 2023 13:17:24 +0200 Subject: [PATCH 23/35] Add counter for unapproved candidates (#7491) * Add counter for unapproved candidates * Update metrics * Split metrics * Remove depth metric * Print only the oldest unapproved candidates * Update logging condition * Fix logging condition * Update logging * Update node/core/approval-voting/src/lib.rs Co-authored-by: Andrei Sandu <54316454+sandreim@users.noreply.github.com> --------- Co-authored-by: Andrei Sandu <54316454+sandreim@users.noreply.github.com> --- node/core/approval-voting/src/lib.rs | 29 ++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index a6a74da50480..05b92f459529 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -160,6 +160,7 @@ struct MetricsInner { time_db_transaction: prometheus::Histogram, time_recover_and_approve: prometheus::Histogram, candidate_signatures_requests_total: prometheus::Counter, + unapproved_candidates_in_unfinalized_chain: prometheus::Gauge, } /// Approval Voting metrics. @@ -246,6 +247,12 @@ impl Metrics { fn time_recover_and_approve(&self) -> Option { self.0.as_ref().map(|metrics| metrics.time_recover_and_approve.start_timer()) } + + fn on_unapproved_candidates_in_unfinalized_chain(&self, count: usize) { + if let Some(metrics) = &self.0 { + metrics.unapproved_candidates_in_unfinalized_chain.set(count as u64); + } + } } impl metrics::Metrics for Metrics { @@ -336,6 +343,13 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + unapproved_candidates_in_unfinalized_chain: prometheus::register( + prometheus::Gauge::new( + "polkadot_parachain_approval_unapproved_candidates_in_unfinalized_chain", + "Number of unapproved candidates in unfinalized chain", + )?, + registry, + )?, }; Ok(Metrics(Some(metrics))) @@ -1298,6 +1312,7 @@ async fn handle_from_overseer( lower_bound, wakeups, &mut approved_ancestor_span, + &metrics, ) .await { @@ -1423,9 +1438,11 @@ async fn handle_approved_ancestor( lower_bound: BlockNumber, wakeups: &Wakeups, span: &mut jaeger::Span, + metrics: &Metrics, ) -> SubsystemResult> { const MAX_TRACING_WINDOW: usize = 200; const ABNORMAL_DEPTH_THRESHOLD: usize = 5; + const LOGGING_DEPTH_THRESHOLD: usize = 10; let mut span = span .child("handle-approved-ancestor") .with_stage(jaeger::Stage::ApprovalChecking); @@ -1471,6 +1488,7 @@ async fn handle_approved_ancestor( } else { Vec::new() }; + let ancestry_len = ancestry.len(); let mut block_descriptions = Vec::new(); @@ -1534,6 +1552,17 @@ async fn handle_approved_ancestor( unapproved.len(), entry.candidates().len(), ); + if ancestry_len >= LOGGING_DEPTH_THRESHOLD && i > ancestry_len - LOGGING_DEPTH_THRESHOLD + { + gum::trace!( + target: LOG_TARGET, + ?block_hash, + "Unapproved candidates at depth {}: {:?}", + bits.len(), + unapproved + ) + } + metrics.on_unapproved_candidates_in_unfinalized_chain(unapproved.len()); entry_span.add_uint_tag("unapproved-candidates", unapproved.len() as u64); for candidate_hash in unapproved { match db.load_candidate_entry(&candidate_hash)? { From e813323b74b57bf64b49b9fe96a231d900578263 Mon Sep 17 00:00:00 2001 From: Chevdor Date: Fri, 11 Aug 2023 15:28:39 +0200 Subject: [PATCH 24/35] Publish RC container images (#7556) * WIP * Add missing checkout * Add debuggin * Fix VAR name * Bug fix * Rework jobs * Revert "Rework jobs" This reverts commit 2bfa79fd3ae633c17403b838f9a5025f0f7fc3f3. * Add cache * Add temp default for testing * Add missing checkout * Fix patch * Comment out the GPG check for now * Rename polkadot_injected_release into a more appropriate polkadot_injected_debian * Refactoring / renaming * Introduce a generic image for binary injection * Flag files to be deleted and changes to be done * WIP * Fix multi binaries images * Add test build scripts * Remove old file, add polkadot build-injected script * Fix doc * Fix tagging * Add build of the injected container * Fix for docker * Remove the need for TTY * Handling container publishing * Fix owner and registry * Fix vars * Fix repo * Fix var naming * Fix case when there is no tag * Fix case with no tag * Handle error * Fix spacings * Fix tags * Remove unnecessary grep that may fail * Add final check * Clean up and introduce GPG check * Add doc * Add doc * Update doc/docker.md Co-authored-by: Mira Ressel * type Co-authored-by: Mira Ressel * Fix used VAR * Improve doc * ci: Update .build-push-image jobs to use the new build-injected.sh * ci: fix path to build-injected.sh script * Rename the release artifacts folder to prevent confusion due to a similar folder in the gitlab CI * ci: check out polkadot repo in .build-push-image This seems far cleaner than copying the entire scripts/ folder into our job artifacts. * feat(build-injected.sh): make PROJECT_ROOT configurable This lets us avoid a dependency on git in our CI image. * ci: build injected images with buildah * ci: pass full image names to zombienet * Add missing ignore --------- Co-authored-by: Mira Ressel --- .github/workflows/check-licenses.yml | 2 +- .../workflows/release-40_publish-rc-image.yml | 132 ++++++++++++++++++ .../release-50_publish-docker-release.yml | 2 +- .../release-51_publish-docker-manual.yml | 2 +- .gitignore | 4 + .gitlab-ci.yml | 36 +++-- doc/docker.md | 86 +++++++----- scripts/ci/common/lib.sh | 70 ++++++++++ .../adder-collator/build-injected.sh | 13 ++ .../dockerfiles/adder-collator/test-build.sh | 23 +++ .../ci/dockerfiles/binary_injected.Dockerfile | 48 +++++++ scripts/ci/dockerfiles/build-injected.sh | 92 ++++++++++++ .../dockerfiles/collator_injected.Dockerfile | 49 ------- scripts/ci/dockerfiles/entrypoint.sh | 18 +++ .../ci/dockerfiles/malus/build-injected.sh | 14 ++ scripts/ci/dockerfiles/malus/test-build.sh | 19 +++ .../ci/dockerfiles/malus_injected.Dockerfile | 50 ------- scripts/ci/dockerfiles/polkadot/README.md | 2 + .../ci/dockerfiles/polkadot/build-injected.sh | 13 ++ scripts/ci/dockerfiles/polkadot/build.sh | 27 ---- .../dockerfiles/polkadot/docker-compose.yml | 13 +- .../polkadot_Dockerfile.README.md | 0 .../polkadot/polkadot_builder.Dockerfile | 2 +- .../polkadot_injected_debian.Dockerfile} | 2 +- scripts/ci/dockerfiles/polkadot/test-build.sh | 18 +++ .../polkadot_injected_debug.Dockerfile | 48 ------- .../ci/dockerfiles/staking-miner/README.md | 37 +++++ .../staking-miner/build-injected.sh | 13 ++ scripts/ci/dockerfiles/staking-miner/build.sh | 13 ++ .../staking-miner_builder.Dockerfile | 11 +- .../staking-miner_injected.Dockerfile | 43 ------ .../dockerfiles/staking-miner/test-build.sh | 18 +++ scripts/ci/gitlab/pipeline/build.yml | 4 - scripts/ci/gitlab/pipeline/publish.yml | 54 +++---- utils/staking-miner/README.md | 6 +- 35 files changed, 661 insertions(+), 323 deletions(-) create mode 100644 .github/workflows/release-40_publish-rc-image.yml create mode 100755 scripts/ci/dockerfiles/adder-collator/build-injected.sh create mode 100755 scripts/ci/dockerfiles/adder-collator/test-build.sh create mode 100644 scripts/ci/dockerfiles/binary_injected.Dockerfile create mode 100755 scripts/ci/dockerfiles/build-injected.sh delete mode 100644 scripts/ci/dockerfiles/collator_injected.Dockerfile create mode 100755 scripts/ci/dockerfiles/entrypoint.sh create mode 100755 scripts/ci/dockerfiles/malus/build-injected.sh create mode 100755 scripts/ci/dockerfiles/malus/test-build.sh delete mode 100644 scripts/ci/dockerfiles/malus_injected.Dockerfile create mode 100755 scripts/ci/dockerfiles/polkadot/build-injected.sh delete mode 100755 scripts/ci/dockerfiles/polkadot/build.sh rename scripts/ci/dockerfiles/{ => polkadot}/polkadot_Dockerfile.README.md (100%) rename scripts/ci/dockerfiles/{polkadot_injected_release.Dockerfile => polkadot/polkadot_injected_debian.Dockerfile} (95%) create mode 100755 scripts/ci/dockerfiles/polkadot/test-build.sh delete mode 100644 scripts/ci/dockerfiles/polkadot_injected_debug.Dockerfile create mode 100644 scripts/ci/dockerfiles/staking-miner/README.md create mode 100755 scripts/ci/dockerfiles/staking-miner/build-injected.sh create mode 100755 scripts/ci/dockerfiles/staking-miner/build.sh delete mode 100644 scripts/ci/dockerfiles/staking-miner/staking-miner_injected.Dockerfile create mode 100755 scripts/ci/dockerfiles/staking-miner/test-build.sh diff --git a/.github/workflows/check-licenses.yml b/.github/workflows/check-licenses.yml index a4c8d5d97424..522037b6827c 100644 --- a/.github/workflows/check-licenses.yml +++ b/.github/workflows/check-licenses.yml @@ -8,7 +8,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: Checkout sources - uses: actions/checkout@v3.3.0 + uses: actions/checkout@v3 - uses: actions/setup-node@v3.7.0 with: node-version: '18.x' diff --git a/.github/workflows/release-40_publish-rc-image.yml b/.github/workflows/release-40_publish-rc-image.yml new file mode 100644 index 000000000000..a821eaa033fd --- /dev/null +++ b/.github/workflows/release-40_publish-rc-image.yml @@ -0,0 +1,132 @@ +name: Release - Publish RC Container image +# see https://github.com/paritytech/release-engineering/issues/97#issuecomment-1651372277 + +on: + workflow_dispatch: + inputs: + release_id: + description: | + Release ID. + You can find it using the command: + curl -s \ + -H "Authorization: Bearer ${GITHUB_TOKEN}" https://api.github.com/repos/$OWNER/$REPO/releases | \ + jq '.[] | { name: .name, id: .id }' + required: true + type: string + registry: + description: "Container registry" + required: true + type: string + default: docker.io + owner: + description: Owner of the container image repo + required: true + type: string + default: parity + +env: + RELEASE_ID: ${{ inputs.release_id }} + ENGINE: docker + REGISTRY: ${{ inputs.registry }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + DOCKER_OWNER: ${{ inputs.owner || github.repository_owner }} + REPO: ${{ github.repository }} + ARTIFACT_FOLDER: release-artifacts + +jobs: + fetch-artifacts: + runs-on: ubuntu-latest + + steps: + - name: Checkout sources + uses: actions/checkout@v3 + + - name: Fetch all artifacts + run: | + . ./scripts/ci/common/lib.sh + fetch_release_artifacts + + - name: Cache the artifacts + uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 + with: + key: artifacts-${{ github.sha }} + path: | + ${ARTIFACT_FOLDER}/**/* + + build-container: + runs-on: ubuntu-latest + needs: fetch-artifacts + + strategy: + matrix: + binary: ["polkadot", "staking-miner"] + + steps: + - name: Checkout sources + uses: actions/checkout@v3 + + - name: Get artifacts from cache + uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 + with: + key: artifacts-${{ github.sha }} + path: | + ${ARTIFACT_FOLDER}/**/* + + - name: Check sha256 ${{ matrix.binary }} + working-directory: ${ARTIFACT_FOLDER} + run: | + . ../scripts/ci/common/lib.sh + + echo "Checking binary ${{ matrix.binary }}" + check_sha256 ${{ matrix.binary }} && echo "OK" || echo "ERR" + + - name: Check GPG ${{ matrix.binary }} + working-directory: ${ARTIFACT_FOLDER} + run: | + . ../scripts/ci/common/lib.sh + import_gpg_keys + check_gpg ${{ matrix.binary }} + + - name: Fetch commit and tag + id: fetch_refs + run: | + release=release-${{ inputs.release_id }} && \ + echo "release=${release}" >> $GITHUB_OUTPUT + + commit=$(git rev-parse --short HEAD) && \ + echo "commit=${commit}" >> $GITHUB_OUTPUT + + tag=$(git name-rev --tags --name-only $(git rev-parse HEAD)) && \ + [ "${tag}" != "undefined" ] && echo "tag=${tag}" >> $GITHUB_OUTPUT || \ + echo "No tag, doing without" + + - name: Build Injected Container image for ${{ matrix.binary }} + env: + BIN_FOLDER: ${ARTIFACT_FOLDER} + BINARY: ${{ matrix.binary }} + TAGS: ${{join(steps.fetch_refs.outputs.*, ',')}} + run: | + echo "Building container for ${{ matrix.binary }}" + ./scripts/ci/dockerfiles/build-injected.sh + + - name: Login to Dockerhub + uses: docker/login-action@v2 + with: + username: ${{ inputs.owner }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Push Container image for ${{ matrix.binary }} + id: docker_push + env: + BINARY: ${{ matrix.binary }} + run: | + $ENGINE images | grep ${BINARY} + $ENGINE push --all-tags ${REGISTRY}/${DOCKER_OWNER}/${BINARY} + + - name: Check version for the published image for ${{ matrix.binary }} + env: + BINARY: ${{ matrix.binary }} + RELEASE_TAG: ${{ steps.fetch_refs.outputs.release }} + run: | + echo "Checking tag ${RELEASE_TAG} for image ${REGISTRY}/${DOCKER_OWNER}/${BINARY}" + $ENGINE run -i ${REGISTRY}/${DOCKER_OWNER}/${BINARY}:${RELEASE_TAG} --version diff --git a/.github/workflows/release-50_publish-docker-release.yml b/.github/workflows/release-50_publish-docker-release.yml index a6bf19162a46..81e5caa718f3 100644 --- a/.github/workflows/release-50_publish-docker-release.yml +++ b/.github/workflows/release-50_publish-docker-release.yml @@ -30,7 +30,7 @@ jobs: uses: docker/build-push-action@v4 with: push: true - file: scripts/ci/dockerfiles/polkadot_injected_release.Dockerfile + file: scripts/ci/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile tags: | parity/polkadot:latest parity/polkadot:${{ github.event.release.tag_name }} diff --git a/.github/workflows/release-51_publish-docker-manual.yml b/.github/workflows/release-51_publish-docker-manual.yml index 0c973d33b71c..919769f8700d 100644 --- a/.github/workflows/release-51_publish-docker-manual.yml +++ b/.github/workflows/release-51_publish-docker-manual.yml @@ -37,7 +37,7 @@ jobs: uses: docker/build-push-action@v4 with: push: true - file: scripts/ci/dockerfiles/polkadot_injected_release.Dockerfile + file: scripts/ci/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile tags: | parity/polkadot:latest parity/polkadot:${{ github.event.inputs.version }} diff --git a/.gitignore b/.gitignore index 0c6913dac340..61ef9e91a55e 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,7 @@ polkadot.* !polkadot.service .DS_Store .env + +artifacts +release-artifacts +release.json diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 5056012e588e..5a84bbfeba85 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -159,31 +159,39 @@ default: - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 .build-push-image: + variables: + CI_IMAGE: "${BUILDAH_IMAGE}" + + REGISTRY: "docker.io" + DOCKER_OWNER: "paritypr" + DOCKER_USER: "${PARITYPR_USER}" + DOCKER_PASS: "${PARITYPR_PASS}" + IMAGE: "${REGISTRY}/${DOCKER_OWNER}/${IMAGE_NAME}" + + ENGINE: "${BUILDAH_COMMAND}" + BUILDAH_FORMAT: "docker" + SKIP_IMAGE_VALIDATION: 1 + + PROJECT_ROOT: "." + BIN_FOLDER: "./artifacts" + VCS_REF: "${CI_COMMIT_SHA}" + before_script: - !reference [.common-before-script, before_script] - test -s ./artifacts/VERSION || exit 1 - test -s ./artifacts/EXTRATAG || exit 1 - - VERSION="$(cat ./artifacts/VERSION)" + - export VERSION="$(cat ./artifacts/VERSION)" - EXTRATAG="$(cat ./artifacts/EXTRATAG)" - echo "Polkadot version = ${VERSION} (EXTRATAG = ${EXTRATAG})" script: - test "$DOCKER_USER" -a "$DOCKER_PASS" || ( echo "no docker credentials provided"; exit 1 ) - - cd ./artifacts - - $BUILDAH_COMMAND build - --format=docker - --build-arg VCS_REF="${CI_COMMIT_SHA}" - --build-arg BUILD_DATE="$(date -u '+%Y-%m-%dT%H:%M:%SZ')" - --build-arg IMAGE_NAME="${IMAGE_NAME}" - --tag "$IMAGE_NAME:$VERSION" - --tag "$IMAGE_NAME:$EXTRATAG" - --file ${DOCKERFILE} . - # The job will success only on the protected branch + - TAGS="${VERSION},${EXTRATAG}" scripts/ci/dockerfiles/build-injected.sh - echo "$DOCKER_PASS" | - buildah login --username "$DOCKER_USER" --password-stdin docker.io + buildah login --username "$DOCKER_USER" --password-stdin "${REGISTRY}" - $BUILDAH_COMMAND info - - $BUILDAH_COMMAND push --format=v2s2 "$IMAGE_NAME:$VERSION" - - $BUILDAH_COMMAND push --format=v2s2 "$IMAGE_NAME:$EXTRATAG" + - $BUILDAH_COMMAND push --format=v2s2 "$IMAGE:$VERSION" + - $BUILDAH_COMMAND push --format=v2s2 "$IMAGE:$EXTRATAG" after_script: - buildah logout --all diff --git a/doc/docker.md b/doc/docker.md index e8b7fa74732e..f20c2d001edd 100644 --- a/doc/docker.md +++ b/doc/docker.md @@ -1,43 +1,58 @@ -# Using Docker +# Using Containers + +The following commands should work no matter if you use Docker or Podman. In general, Podman is recommended. All commands are "engine neutral" so you can use the container engine of your choice while still being able to copy/paste the commands below. + +Let's start defining Podman as our engine: +``` +ENGINE=podman +``` + +If you prefer to stick with Docker, use: +``` +ENGINE=docker +``` ## The easiest way -The easiest/faster option to run Polkadot in Docker is to use the latest release images. These are small images that use the latest official release of the Polkadot binary, pulled from our package repository. +The easiest/faster option to run Polkadot in Docker is to use the latest release images. These are small images that use the latest official release of the Polkadot binary, pulled from our Debian package. -**_Following examples are running on westend chain and without SSL. They can be used to quick start and learn how Polkadot needs to be configured. Please find out how to secure your node, if you want to operate it on the internet. Do not expose RPC and WS ports, if they are not correctly configured._** +**_The following examples are running on westend chain and without SSL. They can be used to quick start and learn how Polkadot needs to be configured. Please find out how to secure your node, if you want to operate it on the internet. Do not expose RPC and WS ports, if they are not correctly configured._** Let's first check the version we have. The first time you run this command, the Polkadot docker image will be downloaded. This takes a bit of time and bandwidth, be patient: ```bash -docker run --rm -it parity/polkadot:latest --version +$ENGINE run --rm -it parity/polkadot:latest --version ``` You can also pass any argument/flag that Polkadot supports: ```bash -docker run --rm -it parity/polkadot:latest --chain westend --name "PolkaDocker" +$ENGINE run --rm -it parity/polkadot:latest --chain westend --name "PolkaDocker" ``` ## Examples -Once you are done experimenting and picking the best node name :) you can start Polkadot as daemon, exposes the Polkadot ports and mount a volume that will keep your blockchain data locally. Make sure that you set the ownership of your local directory to the Polkadot user that is used by the container. Set user id 1000 and group id 1000, by running `chown 1000.1000 /my/local/folder -R` if you use a bind mount. - -To start a Polkadot node on default rpc port 9933 and default p2p port 30333 use the following command. If you want to connect to rpc port 9933, then must add Polkadot startup parameter: `--rpc-external`. +Once you are done experimenting and picking the best node name :) you can start Polkadot as daemon, exposes the Polkadot ports and mount a volume that will keep your blockchain data locally. Make sure that you set the ownership of your local directory to the Polkadot user that is used by the container. -```bash -docker run -d -p 30333:30333 -p 9933:9933 -v /my/local/folder:/polkadot parity/polkadot:latest --chain westend --rpc-external --rpc-cors all -``` +Set user id 1000 and group id 1000, by running `chown 1000.1000 /my/local/folder -R` if you use a bind mount. -Additionally if you want to have custom node name you can add the `--name "YourName"` at the end +To start a Polkadot node on default rpc port 9933 and default p2p port 30333 use the following command. If you want to connect to rpc port 9933, then must add Polkadot startup parameter: `--rpc-external`. ```bash -docker run -d -p 30333:30333 -p 9933:9933 -v /my/local/folder:/polkadot parity/polkadot:latest --chain westend --rpc-external --rpc-cors all --name "PolkaDocker" +$ENGINE run -d -p 30333:30333 -p 9933:9933 \ + -v /my/local/folder:/polkadot \ + parity/polkadot:latest \ + --chain westend --rpc-external --rpc-cors all \ + --name "PolkaDocker ``` If you also want to expose the webservice port 9944 use the following command: ```bash -docker run -d -p 30333:30333 -p 9933:9933 -p 9944:9944 -v /my/local/folder:/polkadot parity/polkadot:latest --chain westend --ws-external --rpc-external --rpc-cors all --name "PolkaDocker" +$ENGINE run -d -p 30333:30333 -p 9933:9933 -p 9944:9944 \ + -v /my/local/folder:/polkadot \ + parity/polkadot:latest \ + --chain westend --ws-external --rpc-external --rpc-cors all --name "PolkaDocker" ``` ## Using Docker compose @@ -55,17 +70,19 @@ services: - 30333:30333 # p2p port - 9933:9933 # rpc port - 9944:9944 # ws port + - 9615:9615 # Prometheus port volumes: - /my/local/folder:/polkadot command: [ "--name", "PolkaDocker", "--ws-external", "--rpc-external", + "--prometheus-external", "--rpc-cors", "all" ] ``` -With following docker-compose.yml you can set up a node and use polkadot-js-apps as the front end on port 80. After starting the node use a browser and enter your Docker host IP in the URL field: __ +With following `docker-compose.yml` you can set up a node and use polkadot-js-apps as the front end on port 80. After starting the node use a browser and enter your Docker host IP in the URL field: __ ```bash version: '2' @@ -78,10 +95,12 @@ services: - 30333:30333 # p2p port - 9933:9933 # rpc port - 9944:9944 # ws port + - 9615:9615 # Prometheus port command: [ "--name", "PolkaDocker", "--ws-external", "--rpc-external", + "--prometheus-external", "--rpc-cors", "all" ] @@ -100,27 +119,30 @@ Chain syncing will utilize all available memory and CPU power your server has to If running on a low resource VPS, use `--memory` and `--cpus` to limit the resources used. E.g. To allow a maximum of 512MB memory and 50% of 1 CPU, use `--cpus=".5" --memory="512m"`. Read more about limiting a container's resources [here](https://docs.docker.com/config/containers/resource_constraints). -Start a shell session with the daemon: -```bash -docker exec -it $(docker ps -q) bash; -``` +## Build your own image -Check the current version: +There are 3 options to build a polkadot container image: +- using the builder image +- using the injected "Debian" image +- using the generic injected image -```bash -polkadot --version -``` +### Builder image -## Build your own image +To get up and running with the smallest footprint on your system, you may use an existing Polkadot Container image. -To get up and running with the smallest footprint on your system, you may use the Polkadot Docker image. -You can build it yourself (it takes a while...) in the shell session of the daemon: +You may also build a polkadot container image yourself (it takes a while...) using the container specs `scripts/ci/dockerfiles/polkadot/polkadot_builder.Dockerfile`. -```bash -cd scripts/ci/dockerfiles/polkadot -./build.sh -``` +### Debian injected + +The Debian injected image is how the official polkadot container image is produced. It relies on the Debian package that is published upon each release. The Debian injected image is usually available a few minutes after a new release is published. +It has the benefit of relying on the GPG signatures embedded in the Debian package. + +### Generic injected + +For simple testing purposes, the easiest option for polkadot and also random binaries, is to use the `binary_injected.Dockerfile` container spec. This option is less secure since the injected binary is not checked at all but it has the benefit to be simple. This option requires to already have a valid `polkadot` binary, compiled for Linux. + +This binary is then simply copied inside the `parity/base-bin` image. ## Reporting issues @@ -128,8 +150,8 @@ If you run into issues with Polkadot when using docker, please run the following (replace the tag with the appropriate one if you do not use latest): ```bash -docker run --rm -it parity/polkadot:latest --version +$ENGINE run --rm -it parity/polkadot:latest --version ``` This will show you the Polkadot version as well as the git commit ref that was used to build your container. -Just paste that in the issue you create. +You can now paste the version information in a [new issue](https://github.com/paritytech/polkadot/issues/new/choose). diff --git a/scripts/ci/common/lib.sh b/scripts/ci/common/lib.sh index 2e94feb150ce..00abe9a1d8d4 100755 --- a/scripts/ci/common/lib.sh +++ b/scripts/ci/common/lib.sh @@ -193,3 +193,73 @@ check_bootnode(){ echo " Bootnode appears unreachable" return 1 } + +# Assumes the ENV are set: +# - RELEASE_ID +# - GITHUB_TOKEN +# - REPO in the form paritytech/polkadot +fetch_release_artifacts() { + echo "Release ID : $RELEASE_ID" + echo "Repo : $REPO" + echo "ARTIFACT_FOLDER: $ARTIFACT_FOLDER" + + curl -L -s \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${GITHUB_TOKEN}" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + https://api.github.com/repos/${REPO}/releases/$RELEASE_ID > release.json + + # Get Asset ids + ids=($(jq -r '.assets[].id' < release.json )) + count=$(jq '.assets|length' < release.json ) + + # Fetch artifacts + mkdir -p ${ARTIFACT_FOLDER} + pushd ${ARTIFACT_FOLDER} > /dev/null + + iter=1 + for id in "${ids[@]}" + do + echo " - $iter/$count: downloading asset id: $id..." + curl -s -OJ -L -H "Accept: application/octet-stream" \ + -H "Authorization: Token ${GITHUB_TOKEN}" \ + "https://api.github.com/repos/${REPO}/releases/assets/$id" + iter=$((iter + 1)) + done + + ls -al --color + popd > /dev/null +} + +# Check the checksum for a given binary +function check_sha256() { + echo "Checking SHA256 for $1" + shasum -qc $1.sha256 +} + +# Import GPG keys of the release team members +# This is done in parallel as it can take a while sometimes +function import_gpg_keys() { + GPG_KEYSERVER=${GPG_KEYSERVER:-"keyserver.ubuntu.com"} + SEC="9D4B2B6EB8F97156D19669A9FF0812D491B96798" + WILL="2835EAF92072BC01D188AF2C4A092B93E97CE1E2" + EGOR="E6FC4D4782EB0FA64A4903CCDB7D3555DD3932D3" + MARA="533C920F40E73A21EEB7E9EBF27AEA7E7594C9CF" + MORGAN="2E92A9D8B15D7891363D1AE8AF9E6C43F7F8C4CF" + + echo "Importing GPG keys from $GPG_KEYSERVER in parallel" + for key in $SEC $WILL $EGOR $MARA $MORGAN; do + ( + echo "Importing GPG key $key" + gpg --no-tty --quiet --keyserver $GPG_KEYSERVER --recv-keys $key + echo -e "5\ny\n" | gpg --no-tty --command-fd 0 --expert --edit-key $key trust; + ) & + done + wait +} + +# Check the GPG signature for a given binary +function check_gpg() { + echo "Checking GPG Signature for $1" + gpg --no-tty --verify -q $1.asc $1 +} diff --git a/scripts/ci/dockerfiles/adder-collator/build-injected.sh b/scripts/ci/dockerfiles/adder-collator/build-injected.sh new file mode 100755 index 000000000000..9a1857bc7ab4 --- /dev/null +++ b/scripts/ci/dockerfiles/adder-collator/build-injected.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +# Sample call: +# $0 /path/to/folder_with_binary +# This script replace the former dedicated Dockerfile +# and shows how to use the generic binary_injected.dockerfile + +PROJECT_ROOT=`git rev-parse --show-toplevel` + +export BINARY=adder-collator,undying-collator +export BIN_FOLDER=$1 + +$PROJECT_ROOT/scripts/ci/dockerfiles/build-injected.sh diff --git a/scripts/ci/dockerfiles/adder-collator/test-build.sh b/scripts/ci/dockerfiles/adder-collator/test-build.sh new file mode 100755 index 000000000000..171e0309f807 --- /dev/null +++ b/scripts/ci/dockerfiles/adder-collator/test-build.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +TMP=$(mktemp -d) +ENGINE=${ENGINE:-podman} + +# TODO: Switch to /bin/bash when the image is built from parity/base-bin + +# Fetch some binaries +$ENGINE run --user root --rm -i \ + --pull always \ + -v "$TMP:/export" \ + --entrypoint /usr/bin/bash \ + paritypr/colander:master -c \ + 'cp "$(which adder-collator)" /export' + +$ENGINE run --user root --rm -i \ + --pull always \ + -v "$TMP:/export" \ + --entrypoint /usr/bin/bash \ + paritypr/colander:master -c \ + 'cp "$(which undying-collator)" /export' + +./build-injected.sh $TMP diff --git a/scripts/ci/dockerfiles/binary_injected.Dockerfile b/scripts/ci/dockerfiles/binary_injected.Dockerfile new file mode 100644 index 000000000000..cee81a2eb8ae --- /dev/null +++ b/scripts/ci/dockerfiles/binary_injected.Dockerfile @@ -0,0 +1,48 @@ +FROM docker.io/parity/base-bin + +# This file allows building a Generic container image +# based on one or multiple pre-built Linux binaries. +# Some defaults are set to polkadot but all can be overriden. + +SHELL ["/bin/bash", "-c"] + +# metadata +ARG VCS_REF +ARG BUILD_DATE +ARG IMAGE_NAME + +# That can be a single one or a comma separated list +ARG BINARY=polkadot + +ARG BIN_FOLDER=. +ARG DOC_URL=https://github.com/paritytech/polkadot +ARG DESCRIPTION="Polkadot: a platform for web3" +ARG AUTHORS="devops-team@parity.io" +ARG VENDOR="Parity Technologies" + +LABEL io.parity.image.authors=${AUTHORS} \ + io.parity.image.vendor="${VENDOR}" \ + io.parity.image.revision="${VCS_REF}" \ + io.parity.image.title="${IMAGE_NAME}" \ + io.parity.image.created="${BUILD_DATE}" \ + io.parity.image.documentation="${DOC_URL}" \ + io.parity.image.description="${DESCRIPTION}" \ + io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/binary_injected.Dockerfile" + +USER root +WORKDIR /app + +# add polkadot binary to docker image +# sample for polkadot: COPY ./polkadot ./polkadot-*-worker /usr/local/bin/ +COPY entrypoint.sh . +COPY "bin/*" "/usr/local/bin/" +RUN chmod -R a+rx "/usr/local/bin" + +USER parity +ENV BINARY=${BINARY} + +# ENTRYPOINT +ENTRYPOINT ["/app/entrypoint.sh"] + +# We call the help by default +CMD ["--help"] diff --git a/scripts/ci/dockerfiles/build-injected.sh b/scripts/ci/dockerfiles/build-injected.sh new file mode 100755 index 000000000000..d0e7fee3646e --- /dev/null +++ b/scripts/ci/dockerfiles/build-injected.sh @@ -0,0 +1,92 @@ +#!/usr/bin/env bash +set -e + +# This script allows building a Container Image from a Linux +# binary that is injected into a base-image. + +ENGINE=${ENGINE:-podman} + +if [ "$ENGINE" == "podman" ]; then + PODMAN_FLAGS="--format docker" +else + PODMAN_FLAGS="" +fi + +CONTEXT=$(mktemp -d) +REGISTRY=${REGISTRY:-docker.io} + +# The following line ensure we know the project root +PROJECT_ROOT=${PROJECT_ROOT:-$(git rev-parse --show-toplevel)} +DOCKERFILE=${DOCKERFILE:-$PROJECT_ROOT/scripts/ci/dockerfiles/binary_injected.Dockerfile} +VERSION_TOML=$(grep "^version " $PROJECT_ROOT/Cargo.toml | grep -oE "([0-9\.]+-?[0-9]+)") + +#n The following VAR have default that can be overriden +DOCKER_OWNER=${DOCKER_OWNER:-parity} + +# We may get 1..n binaries, comma separated +BINARY=${BINARY:-polkadot} +IFS=',' read -r -a BINARIES <<< "$BINARY" + +VERSION=${VERSION:-$VERSION_TOML} +BIN_FOLDER=${BIN_FOLDER:-.} + +IMAGE=${IMAGE:-${REGISTRY}/${DOCKER_OWNER}/${BINARIES[0]}} +DESCRIPTION_DEFAULT="Injected Container image built for ${BINARY}" +DESCRIPTION=${DESCRIPTION:-$DESCRIPTION_DEFAULT} + +VCS_REF=${VCS_REF:-01234567} + +# Build the image +echo "Using engine: $ENGINE" +echo "Using Dockerfile: $DOCKERFILE" +echo "Using context: $CONTEXT" +echo "Building ${IMAGE}:latest container image for ${BINARY} v${VERSION} from ${BIN_FOLDER} hang on!" +echo "BIN_FOLDER=$BIN_FOLDER" +echo "CONTEXT=$CONTEXT" + +# We need all binaries and resources available in the Container build "CONTEXT" +mkdir -p $CONTEXT/bin +for bin in "${BINARIES[@]}" +do + echo "Copying $BIN_FOLDER/$bin to context: $CONTEXT/bin" + cp "$BIN_FOLDER/$bin" "$CONTEXT/bin" +done + +cp "$PROJECT_ROOT/scripts/ci/dockerfiles/entrypoint.sh" "$CONTEXT" + +echo "Building image: ${IMAGE}" + +TAGS=${TAGS[@]:-latest} +IFS=',' read -r -a TAG_ARRAY <<< "$TAGS" +TAG_ARGS=" " + +echo "The image ${IMAGE} will be tagged with ${TAG_ARRAY[*]}" +for tag in "${TAG_ARRAY[@]}"; do + TAG_ARGS+="--tag ${IMAGE}:${tag} " +done + +echo "$TAG_ARGS" + +# time \ +$ENGINE build \ + ${PODMAN_FLAGS} \ + --build-arg VCS_REF="${VCS_REF}" \ + --build-arg BUILD_DATE=$(date -u '+%Y-%m-%dT%H:%M:%SZ') \ + --build-arg IMAGE_NAME="${IMAGE}" \ + --build-arg BINARY="${BINARY}" \ + --build-arg BIN_FOLDER="${BIN_FOLDER}" \ + --build-arg DESCRIPTION="${DESCRIPTION}" \ + ${TAG_ARGS} \ + -f "${DOCKERFILE}" \ + ${CONTEXT} + +echo "Your Container image for ${IMAGE} is ready" +$ENGINE images + +if [[ -z "${SKIP_IMAGE_VALIDATION}" ]]; then + echo "Check the image ${IMAGE}:${TAG_ARRAY[0]}" + $ENGINE run --rm -i "${IMAGE}:${TAG_ARRAY[0]}" --version + + echo "Query binaries" + $ENGINE run --rm -i --entrypoint /bin/bash "${IMAGE}:${TAG_ARRAY[0]}" -c 'echo BINARY: $BINARY' +fi diff --git a/scripts/ci/dockerfiles/collator_injected.Dockerfile b/scripts/ci/dockerfiles/collator_injected.Dockerfile deleted file mode 100644 index 91b8cb0057bf..000000000000 --- a/scripts/ci/dockerfiles/collator_injected.Dockerfile +++ /dev/null @@ -1,49 +0,0 @@ -# this file copies from scripts/ci/dockerfiles/Dockerfile and changes only the binary name -FROM docker.io/library/ubuntu:20.04 - -# metadata -ARG VCS_REF -ARG BUILD_DATE -ARG IMAGE_NAME - -LABEL io.parity.image.authors="devops-team@parity.io" \ - io.parity.image.vendor="Parity Technologies" \ - io.parity.image.title="${IMAGE_NAME}" \ - io.parity.image.description="Injected adder-collator Docker image" \ - io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/collator_injected.Dockerfile" \ - io.parity.image.revision="${VCS_REF}" \ - io.parity.image.created="${BUILD_DATE}" \ - io.parity.image.documentation="https://github.com/paritytech/polkadot/" - -# show backtraces -ENV RUST_BACKTRACE 1 - -# install tools and dependencies -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y \ - libssl1.1 \ - ca-certificates && \ -# apt cleanup - apt-get autoremove -y && \ - apt-get clean && \ - find /var/lib/apt/lists/ -type f -not -name lock -delete; \ -# add user and link ~/.local/share/adder-collator to /data - useradd -m -u 1000 -U -s /bin/sh -d /adder-collator adder-collator && \ - mkdir -p /data /adder-collator/.local/share && \ - chown -R adder-collator:adder-collator /data && \ - ln -s /data /adder-collator/.local/share/polkadot - -# add adder-collator binary to docker image -COPY ./adder-collator /usr/local/bin -COPY ./undying-collator /usr/local/bin - -USER adder-collator - -# check if executable works in this container -RUN /usr/local/bin/adder-collator --version -RUN /usr/local/bin/undying-collator --version - -EXPOSE 30333 9933 9944 -VOLUME ["/adder-collator"] - -ENTRYPOINT ["/usr/local/bin/adder-collator"] diff --git a/scripts/ci/dockerfiles/entrypoint.sh b/scripts/ci/dockerfiles/entrypoint.sh new file mode 100755 index 000000000000..eaa815faf6a4 --- /dev/null +++ b/scripts/ci/dockerfiles/entrypoint.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +# Sanity check +if [ -z "$BINARY" ] +then + echo "BINARY ENV not defined, this should never be the case. Aborting..." + exit 1 +fi + +# If the user built the image with multiple binaries, +# we consider the first one to be the canonical one +# To start with another binary, the user can either: +# - use the --entrypoint option +# - pass the ENV BINARY with a single binary +IFS=',' read -r -a BINARIES <<< "$BINARY" +BIN0=${BINARIES[0]} +echo "Starting binary $BIN0" +$BIN0 $@ diff --git a/scripts/ci/dockerfiles/malus/build-injected.sh b/scripts/ci/dockerfiles/malus/build-injected.sh new file mode 100755 index 000000000000..99bd5fde1d5a --- /dev/null +++ b/scripts/ci/dockerfiles/malus/build-injected.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +# Sample call: +# $0 /path/to/folder_with_binary +# This script replace the former dedicated Dockerfile +# and shows how to use the generic binary_injected.dockerfile + +PROJECT_ROOT=`git rev-parse --show-toplevel` + +export BINARY=malus,polkadot-execute-worker,polkadot-prepare-worker +export BIN_FOLDER=$1 +# export TAGS=... + +$PROJECT_ROOT/scripts/ci/dockerfiles/build-injected.sh diff --git a/scripts/ci/dockerfiles/malus/test-build.sh b/scripts/ci/dockerfiles/malus/test-build.sh new file mode 100755 index 000000000000..3114e9e2adf1 --- /dev/null +++ b/scripts/ci/dockerfiles/malus/test-build.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +TMP=$(mktemp -d) +ENGINE=${ENGINE:-podman} + +export TAGS=latest,beta,7777,1.0.2-rc23 + +# Fetch some binaries +$ENGINE run --user root --rm -i \ + --pull always \ + -v "$TMP:/export" \ + --entrypoint /bin/bash \ + paritypr/malus:7217 -c \ + 'cp "$(which malus)" /export' + +echo "Checking binaries we got:" +ls -al $TMP + +./build-injected.sh $TMP diff --git a/scripts/ci/dockerfiles/malus_injected.Dockerfile b/scripts/ci/dockerfiles/malus_injected.Dockerfile deleted file mode 100644 index fa429b5f142a..000000000000 --- a/scripts/ci/dockerfiles/malus_injected.Dockerfile +++ /dev/null @@ -1,50 +0,0 @@ -FROM debian:bullseye-slim - -# metadata -ARG VCS_REF -ARG BUILD_DATE -ARG IMAGE_NAME - -LABEL io.parity.image.authors="devops-team@parity.io" \ - io.parity.image.vendor="Parity Technologies" \ - io.parity.image.title="${IMAGE_NAME}" \ - io.parity.image.description="Malus - the nemesis of polkadot" \ - io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/malus.Dockerfile" \ - io.parity.image.revision="${VCS_REF}" \ - io.parity.image.created="${BUILD_DATE}" \ - io.parity.image.documentation="https://github.com/paritytech/polkadot/" - -# show backtraces -ENV RUST_BACKTRACE 1 - -# install tools and dependencies -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y \ - ca-certificates \ - curl \ - libssl1.1 \ - tini && \ -# apt cleanup - apt-get autoremove -y && \ - apt-get clean && \ - find /var/lib/apt/lists/ -type f -not -name lock -delete; \ -# add user - groupadd --gid 10000 nonroot && \ - useradd --home-dir /home/nonroot \ - --create-home \ - --shell /bin/bash \ - --gid nonroot \ - --groups nonroot \ - --uid 10000 nonroot - - -# add malus binary to docker image -COPY ./malus ./polkadot-execute-worker ./polkadot-prepare-worker /usr/local/bin - -USER nonroot - -# check if executable works in this container -RUN /usr/local/bin/malus --version - -# Tini allows us to avoid several Docker edge cases, see https://github.com/krallin/tini. -ENTRYPOINT ["tini", "--", "/bin/bash"] diff --git a/scripts/ci/dockerfiles/polkadot/README.md b/scripts/ci/dockerfiles/polkadot/README.md index 9ddf324bb29c..e331d8984c2c 100644 --- a/scripts/ci/dockerfiles/polkadot/README.md +++ b/scripts/ci/dockerfiles/polkadot/README.md @@ -1,7 +1,9 @@ # Self built Docker image The Polkadot repo contains several options to build Docker images for Polkadot. + This folder contains a self-contained image that does not require a Linux pre-built binary. + Instead, building the image is possible on any host having docker installed and will build Polkadot inside Docker. That also means that no Rust toolchain is required on the host machine for the build to succeed. diff --git a/scripts/ci/dockerfiles/polkadot/build-injected.sh b/scripts/ci/dockerfiles/polkadot/build-injected.sh new file mode 100755 index 000000000000..22774c7b7122 --- /dev/null +++ b/scripts/ci/dockerfiles/polkadot/build-injected.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +# Sample call: +# $0 /path/to/folder_with_binary +# This script replace the former dedicated Dockerfile +# and shows how to use the generic binary_injected.dockerfile + +PROJECT_ROOT=`git rev-parse --show-toplevel` + +export BINARY=polkadot,polkadot-execute-worker,polkadot-prepare-worker +export BIN_FOLDER=$1 + +$PROJECT_ROOT/scripts/ci/dockerfiles/build-injected.sh diff --git a/scripts/ci/dockerfiles/polkadot/build.sh b/scripts/ci/dockerfiles/polkadot/build.sh deleted file mode 100755 index d00c9108bd8c..000000000000 --- a/scripts/ci/dockerfiles/polkadot/build.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env bash -set -e - -pushd . - -# The following line ensure we run from the project root -PROJECT_ROOT=`git rev-parse --show-toplevel` -cd $PROJECT_ROOT - -# Find the current version from Cargo.toml -VERSION=`grep "^version" ./cli/Cargo.toml | egrep -o "([0-9\.]+-?[0-9]+)"` -GITUSER=parity -GITREPO=polkadot - -# Build the image -echo "Building ${GITUSER}/${GITREPO}:latest docker image, hang on!" -time docker build \ - -f ./scripts/ci/dockerfiles/polkadot/polkadot_builder.Dockerfile \ - -t ${GITUSER}/${GITREPO}:latest \ - -t ${GITUSER}/${GITREPO}:v${VERSION} \ - . - -# Show the list of available images for this repo -echo "Your Docker image for $GITUSER/$GITREPO is ready" -docker images | grep ${GITREPO} - -popd diff --git a/scripts/ci/dockerfiles/polkadot/docker-compose.yml b/scripts/ci/dockerfiles/polkadot/docker-compose.yml index 978191af88c1..524b1164796a 100644 --- a/scripts/ci/dockerfiles/polkadot/docker-compose.yml +++ b/scripts/ci/dockerfiles/polkadot/docker-compose.yml @@ -1,23 +1,22 @@ version: '3' services: polkadot: + image: parity/polkadot:latest + ports: - "127.0.0.1:30333:30333/tcp" - "127.0.0.1:9933:9933/tcp" - image: parity/polkadot:latest + - "127.0.0.1:9944:9944/tcp" + - "127.0.0.1:9615:9615/tcp" + volumes: - "polkadot-data:/data" + command: | --unsafe-rpc-external --unsafe-ws-external --rpc-cors all --prometheus-external - ports: - - "30333:30333" - - "9933:9933" - - "9944:9944" - - "9615:9615" - volumes: polkadot-data: diff --git a/scripts/ci/dockerfiles/polkadot_Dockerfile.README.md b/scripts/ci/dockerfiles/polkadot/polkadot_Dockerfile.README.md similarity index 100% rename from scripts/ci/dockerfiles/polkadot_Dockerfile.README.md rename to scripts/ci/dockerfiles/polkadot/polkadot_Dockerfile.README.md diff --git a/scripts/ci/dockerfiles/polkadot/polkadot_builder.Dockerfile b/scripts/ci/dockerfiles/polkadot/polkadot_builder.Dockerfile index 6e31298432f7..f263c836bbfe 100644 --- a/scripts/ci/dockerfiles/polkadot/polkadot_builder.Dockerfile +++ b/scripts/ci/dockerfiles/polkadot/polkadot_builder.Dockerfile @@ -7,7 +7,7 @@ COPY . /polkadot RUN cargo build --locked --release # This is the 2nd stage: a very small image where we copy the Polkadot binary." -FROM docker.io/library/ubuntu:20.04 +FROM docker.io/parity/base-bin:latest LABEL description="Multistage Docker image for Polkadot: a platform for web3" \ io.parity.image.type="builder" \ diff --git a/scripts/ci/dockerfiles/polkadot_injected_release.Dockerfile b/scripts/ci/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile similarity index 95% rename from scripts/ci/dockerfiles/polkadot_injected_release.Dockerfile rename to scripts/ci/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile index 74b5c7f48f88..e2c72dcfe2e9 100644 --- a/scripts/ci/dockerfiles/polkadot_injected_release.Dockerfile +++ b/scripts/ci/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile @@ -11,7 +11,7 @@ LABEL io.parity.image.authors="devops-team@parity.io" \ io.parity.image.vendor="Parity Technologies" \ io.parity.image.title="parity/polkadot" \ io.parity.image.description="Polkadot: a platform for web3. This is the official Parity image with an injected binary." \ - io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/polkadot_injected_release.Dockerfile" \ + io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile" \ io.parity.image.revision="${VCS_REF}" \ io.parity.image.created="${BUILD_DATE}" \ io.parity.image.documentation="https://github.com/paritytech/polkadot/" diff --git a/scripts/ci/dockerfiles/polkadot/test-build.sh b/scripts/ci/dockerfiles/polkadot/test-build.sh new file mode 100755 index 000000000000..d2d904561cb5 --- /dev/null +++ b/scripts/ci/dockerfiles/polkadot/test-build.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +TMP=$(mktemp -d) +ENGINE=${ENGINE:-podman} + +# You need to build an injected image first + +# Fetch some binaries +$ENGINE run --user root --rm -i \ + -v "$TMP:/export" \ + --entrypoint /bin/bash \ + parity/polkadot -c \ + 'cp "$(which polkadot)" /export' + +echo "Checking binaries we got:" +tree $TMP + +./build-injected.sh $TMP diff --git a/scripts/ci/dockerfiles/polkadot_injected_debug.Dockerfile b/scripts/ci/dockerfiles/polkadot_injected_debug.Dockerfile deleted file mode 100644 index aebbbdcf1b7f..000000000000 --- a/scripts/ci/dockerfiles/polkadot_injected_debug.Dockerfile +++ /dev/null @@ -1,48 +0,0 @@ -FROM docker.io/library/ubuntu:20.04 - -# metadata -ARG VCS_REF -ARG BUILD_DATE -ARG IMAGE_NAME - -LABEL io.parity.image.authors="devops-team@parity.io" \ - io.parity.image.vendor="Parity Technologies" \ - io.parity.image.title="${IMAGE_NAME}" \ - io.parity.image.description="Polkadot: a platform for web3" \ - io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/polkadot_injected_debug.Dockerfile" \ - io.parity.image.revision="${VCS_REF}" \ - io.parity.image.created="${BUILD_DATE}" \ - io.parity.image.documentation="https://github.com/paritytech/polkadot/" - -# show backtraces -ENV RUST_BACKTRACE 1 - -# install tools and dependencies -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y \ - libssl1.1 \ - ca-certificates && \ -# apt cleanup - apt-get autoremove -y && \ - apt-get clean && \ - find /var/lib/apt/lists/ -type f -not -name lock -delete; \ -# add user and link ~/.local/share/polkadot to /data - useradd -m -u 1000 -U -s /bin/sh -d /polkadot polkadot && \ - mkdir -p /data /polkadot/.local/share && \ - chown -R polkadot:polkadot /data && \ - ln -s /data /polkadot/.local/share/polkadot - -# add polkadot binary to docker image -COPY ./polkadot ./polkadot-execute-worker ./polkadot-prepare-worker /usr/local/bin - -USER polkadot - -# check if executable works in this container -RUN /usr/local/bin/polkadot --version -RUN /usr/local/bin/polkadot-execute-worker --version -RUN /usr/local/bin/polkadot-prepare-worker --version - -EXPOSE 30333 9933 9944 -VOLUME ["/polkadot"] - -ENTRYPOINT ["/usr/local/bin/polkadot"] diff --git a/scripts/ci/dockerfiles/staking-miner/README.md b/scripts/ci/dockerfiles/staking-miner/README.md new file mode 100644 index 000000000000..3610e1130316 --- /dev/null +++ b/scripts/ci/dockerfiles/staking-miner/README.md @@ -0,0 +1,37 @@ +# staking-miner container image + +## Build using the Builder + +``` +./build.sh +``` + +## Build the injected Image + +You first need a valid Linux binary to inject. Let's assume this binary is located in `BIN_FOLDER`. + +``` +./build-injected.sh "$BIN_FOLDER" +``` + +## Test + +Here is how to test the image. We can generate a valid seed but the staking-miner will quickly notice that our +account is not funded and "does not exist". + +You may pass any ENV supported by the binary and must provide at least a few such as `SEED` and `URI`: +``` +ENV SEED="" +ENV URI="wss://rpc.polkadot.io:443" +ENV RUST_LOG="info" +``` + +``` +export SEED=$(subkey generate -n polkadot --output-type json | jq -r .secretSeed) +podman run --rm -it \ + -e URI="wss://rpc.polkadot.io:443" \ + -e RUST_LOG="info" \ + -e SEED \ + localhost/parity/staking-miner \ + dry-run seq-phragmen +``` diff --git a/scripts/ci/dockerfiles/staking-miner/build-injected.sh b/scripts/ci/dockerfiles/staking-miner/build-injected.sh new file mode 100755 index 000000000000..536636df6a91 --- /dev/null +++ b/scripts/ci/dockerfiles/staking-miner/build-injected.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +# Sample call: +# $0 /path/to/folder_with_staking-miner_binary +# This script replace the former dedicated staking-miner "injected" Dockerfile +# and shows how to use the generic binary_injected.dockerfile + +PROJECT_ROOT=`git rev-parse --show-toplevel` + +export BINARY=staking-miner +export BIN_FOLDER=$1 + +$PROJECT_ROOT/scripts/ci/dockerfiles/build-injected.sh diff --git a/scripts/ci/dockerfiles/staking-miner/build.sh b/scripts/ci/dockerfiles/staking-miner/build.sh new file mode 100755 index 000000000000..67c82afcd2ce --- /dev/null +++ b/scripts/ci/dockerfiles/staking-miner/build.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +# Sample call: +# $0 /path/to/folder_with_staking-miner_binary +# This script replace the former dedicated staking-miner "injected" Dockerfile +# and shows how to use the generic binary_injected.dockerfile + +PROJECT_ROOT=`git rev-parse --show-toplevel` +ENGINE=podman + +echo "Building the staking-miner using the Builder image" +echo "PROJECT_ROOT=$PROJECT_ROOT" +$ENGINE build -t staking-miner -f staking-miner_builder.Dockerfile "$PROJECT_ROOT" diff --git a/scripts/ci/dockerfiles/staking-miner/staking-miner_builder.Dockerfile b/scripts/ci/dockerfiles/staking-miner/staking-miner_builder.Dockerfile index a1932095fd4c..0ae77f36c79d 100644 --- a/scripts/ci/dockerfiles/staking-miner/staking-miner_builder.Dockerfile +++ b/scripts/ci/dockerfiles/staking-miner/staking-miner_builder.Dockerfile @@ -4,17 +4,17 @@ FROM paritytech/ci-linux:production as builder ARG VCS_REF ARG BUILD_DATE ARG IMAGE_NAME="staking-miner" -ARG PROFILE=release +ARG PROFILE=production LABEL description="This is the build stage. Here we create the binary." WORKDIR /app COPY . /app -RUN cargo build --locked --$PROFILE --package staking-miner +RUN cargo build --locked --profile $PROFILE --package staking-miner # ===== SECOND STAGE ====== -FROM docker.io/library/ubuntu:20.04 +FROM docker.io/parity/base-bin:latest LABEL description="This is the 2nd stage: a very small image where we copy the binary." LABEL io.parity.image.authors="devops-team@parity.io" \ io.parity.image.vendor="Parity Technologies" \ @@ -28,13 +28,10 @@ LABEL io.parity.image.authors="devops-team@parity.io" \ ARG PROFILE=release COPY --from=builder /app/target/$PROFILE/staking-miner /usr/local/bin -RUN useradd -u 1000 -U -s /bin/sh miner && \ - rm -rf /usr/bin /usr/sbin - # show backtraces ENV RUST_BACKTRACE 1 -USER miner +USER parity ENV SEED="" ENV URI="wss://rpc.polkadot.io" diff --git a/scripts/ci/dockerfiles/staking-miner/staking-miner_injected.Dockerfile b/scripts/ci/dockerfiles/staking-miner/staking-miner_injected.Dockerfile deleted file mode 100644 index 4901ab4a3736..000000000000 --- a/scripts/ci/dockerfiles/staking-miner/staking-miner_injected.Dockerfile +++ /dev/null @@ -1,43 +0,0 @@ -FROM docker.io/library/ubuntu:20.04 - -# metadata -ARG VCS_REF -ARG BUILD_DATE -ARG IMAGE_NAME="staking-miner" - -LABEL io.parity.image.authors="devops-team@parity.io" \ - io.parity.image.vendor="Parity Technologies" \ - io.parity.image.title="${IMAGE_NAME}" \ - io.parity.image.description="${IMAGE_NAME} for substrate based chains" \ - io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/${IMAGE_NAME}/${IMAGE_NAME}_injected.Dockerfile" \ - io.parity.image.revision="${VCS_REF}" \ - io.parity.image.created="${BUILD_DATE}" \ - io.parity.image.documentation="https://github.com/paritytech/polkadot/" - -# show backtraces -ENV RUST_BACKTRACE 1 - -# install tools and dependencies -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y \ - libssl1.1 \ - ca-certificates && \ -# apt cleanup - apt-get autoremove -y && \ - apt-get clean && \ - find /var/lib/apt/lists/ -type f -not -name lock -delete; \ - useradd -u 1000 -U -s /bin/sh miner - -# add binary to docker image -COPY ./staking-miner /usr/local/bin - -USER miner - -ENV SEED="" -ENV URI="wss://rpc.polkadot.io" -ENV RUST_LOG="info" - -# check if the binary works in this container -RUN /usr/local/bin/staking-miner --version - -ENTRYPOINT [ "/usr/local/bin/staking-miner" ] diff --git a/scripts/ci/dockerfiles/staking-miner/test-build.sh b/scripts/ci/dockerfiles/staking-miner/test-build.sh new file mode 100755 index 000000000000..0ce74e2df296 --- /dev/null +++ b/scripts/ci/dockerfiles/staking-miner/test-build.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +TMP=$(mktemp -d) +ENGINE=${ENGINE:-podman} + +# You need to build an injected image first + +# Fetch some binaries +$ENGINE run --user root --rm -i \ + -v "$TMP:/export" \ + --entrypoint /bin/bash \ + parity/staking-miner -c \ + 'cp "$(which staking-miner)" /export' + +echo "Checking binaries we got:" +tree $TMP + +./build-injected.sh $TMP diff --git a/scripts/ci/gitlab/pipeline/build.yml b/scripts/ci/gitlab/pipeline/build.yml index dafca393cd4f..845ac7970108 100644 --- a/scripts/ci/gitlab/pipeline/build.yml +++ b/scripts/ci/gitlab/pipeline/build.yml @@ -39,7 +39,6 @@ build-linux-stable: - echo -n ${CI_JOB_ID} > ./artifacts/BUILD_LINUX_JOB_ID - RELEASE_VERSION=$(./artifacts/polkadot -V | awk '{print $2}'| awk -F "-" '{print $1}') - echo -n "v${RELEASE_VERSION}" > ./artifacts/BUILD_RELEASE_VERSION - - cp -r scripts/* ./artifacts build-test-collators: stage: build @@ -64,7 +63,6 @@ build-test-collators: - echo -n "${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}" > ./artifacts/EXTRATAG - echo "adder-collator version = $(cat ./artifacts/VERSION) (EXTRATAG = $(cat ./artifacts/EXTRATAG))" - echo "undying-collator version = $(cat ./artifacts/VERSION) (EXTRATAG = $(cat ./artifacts/EXTRATAG))" - - cp -r ./scripts/* ./artifacts build-malus: stage: build @@ -88,7 +86,6 @@ build-malus: - echo -n "${CI_COMMIT_REF_NAME}" > ./artifacts/VERSION - echo -n "${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}" > ./artifacts/EXTRATAG - echo "polkadot-test-malus = $(cat ./artifacts/VERSION) (EXTRATAG = $(cat ./artifacts/EXTRATAG))" - - cp -r ./scripts/* ./artifacts build-staking-miner: stage: build @@ -110,7 +107,6 @@ build-staking-miner: - echo -n "${CI_COMMIT_REF_NAME}" > ./artifacts/VERSION - echo -n "${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}" > ./artifacts/EXTRATAG - echo "staking-miner = $(cat ./artifacts/VERSION) (EXTRATAG = $(cat ./artifacts/EXTRATAG))" - - cp -r ./scripts/* ./artifacts build-rustdoc: stage: build diff --git a/scripts/ci/gitlab/pipeline/publish.yml b/scripts/ci/gitlab/pipeline/publish.yml index d9a0dff95767..c224094125e3 100644 --- a/scripts/ci/gitlab/pipeline/publish.yml +++ b/scripts/ci/gitlab/pipeline/publish.yml @@ -19,20 +19,16 @@ publish-polkadot-debug-image: - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 variables: - CI_IMAGE: ${BUILDAH_IMAGE} - GIT_STRATEGY: none - DOCKER_USER: ${PARITYPR_USER} - DOCKER_PASS: ${PARITYPR_PASS} - # scripts/ci/dockerfiles/polkadot_injected_debug.Dockerfile - DOCKERFILE: ci/dockerfiles/polkadot_injected_debug.Dockerfile - IMAGE_NAME: docker.io/paritypr/polkadot-debug + IMAGE_NAME: "polkadot-debug" + BINARY: "polkadot,polkadot-execute-worker,polkadot-prepare-worker" needs: - job: build-linux-stable artifacts: true after_script: + - !reference [.build-push-image, after_script] # pass artifacts to the zombienet-tests job # https://docs.gitlab.com/ee/ci/multi_project_pipelines.html#with-variable-inheritance - - echo "PARACHAINS_IMAGE_NAME=${IMAGE_NAME}" > ./artifacts/parachains.env + - echo "PARACHAINS_IMAGE_NAME=${IMAGE}" > ./artifacts/parachains.env - echo "PARACHAINS_IMAGE_TAG=$(cat ./artifacts/EXTRATAG)" >> ./artifacts/parachains.env artifacts: reports: @@ -48,20 +44,15 @@ publish-test-collators-image: - .build-push-image - .zombienet-refs variables: - CI_IMAGE: ${BUILDAH_IMAGE} - GIT_STRATEGY: none - DOCKER_USER: ${PARITYPR_USER} - DOCKER_PASS: ${PARITYPR_PASS} - # scripts/ci/dockerfiles/collator_injected.Dockerfile - DOCKERFILE: ci/dockerfiles/collator_injected.Dockerfile - IMAGE_NAME: docker.io/paritypr/colander + IMAGE_NAME: "colander" + BINARY: "adder-collator,undying-collator" needs: - job: build-test-collators artifacts: true after_script: - - buildah logout --all + - !reference [.build-push-image, after_script] # pass artifacts to the zombienet-tests job - - echo "COLLATOR_IMAGE_NAME=${IMAGE_NAME}" > ./artifacts/collator.env + - echo "COLLATOR_IMAGE_NAME=${IMAGE}" > ./artifacts/collator.env - echo "COLLATOR_IMAGE_TAG=$(cat ./artifacts/EXTRATAG)" >> ./artifacts/collator.env artifacts: reports: @@ -76,20 +67,15 @@ publish-malus-image: - .build-push-image - .zombienet-refs variables: - CI_IMAGE: ${BUILDAH_IMAGE} - GIT_STRATEGY: none - DOCKER_USER: ${PARITYPR_USER} - DOCKER_PASS: ${PARITYPR_PASS} - # scripts/ci/dockerfiles/malus_injected.Dockerfile - DOCKERFILE: ci/dockerfiles/malus_injected.Dockerfile - IMAGE_NAME: docker.io/paritypr/malus + IMAGE_NAME: "malus" + BINARY: "malus,polkadot-execute-worker,polkadot-prepare-worker" needs: - job: build-malus artifacts: true after_script: - - buildah logout "$IMAGE_NAME" + - !reference [.build-push-image, after_script] # pass artifacts to the zombienet-tests job - - echo "MALUS_IMAGE_NAME=${IMAGE_NAME}" > ./artifacts/malus.env + - echo "MALUS_IMAGE_NAME=${IMAGE}" > ./artifacts/malus.env - echo "MALUS_IMAGE_TAG=$(cat ./artifacts/EXTRATAG)" >> ./artifacts/malus.env artifacts: reports: @@ -103,13 +89,11 @@ publish-staking-miner-image: - .build-push-image - .publish-refs variables: - CI_IMAGE: ${BUILDAH_IMAGE} - # scripts/ci/dockerfiles/staking-miner/staking-miner_injected.Dockerfile - DOCKERFILE: ci/dockerfiles/staking-miner/staking-miner_injected.Dockerfile - IMAGE_NAME: docker.io/paritytech/staking-miner - GIT_STRATEGY: none - DOCKER_USER: ${Docker_Hub_User_Parity} - DOCKER_PASS: ${Docker_Hub_Pass_Parity} + IMAGE_NAME: "staking-miner" + BINARY: "staking-miner" + DOCKER_OWNER: "paritytech" + DOCKER_USER: "${Docker_Hub_User_Parity}" + DOCKER_PASS: "${Docker_Hub_Pass_Parity}" needs: - job: build-staking-miner artifacts: true @@ -122,11 +106,11 @@ publish-polkadot-image-description: DOCKER_PASSWORD: ${Docker_Hub_Pass_Parity} DOCKERHUB_REPOSITORY: parity/polkadot SHORT_DESCRIPTION: "Polkadot Official Docker Image" - README_FILEPATH: $CI_PROJECT_DIR/scripts/ci/dockerfiles/polkadot_Dockerfile.README.md + README_FILEPATH: $CI_PROJECT_DIR/scripts/ci/dockerfiles/polkadot/polkadot_Dockerfile.README.md rules: - if: $CI_COMMIT_REF_NAME == "master" changes: - - scripts/ci/dockerfiles/polkadot_Dockerfile.README.md + - scripts/ci/dockerfiles/polkadot/polkadot_Dockerfile.README.md - if: $CI_PIPELINE_SOURCE == "schedule" when: never script: diff --git a/utils/staking-miner/README.md b/utils/staking-miner/README.md index 4148677ee7ca..b7f70de573b0 100644 --- a/utils/staking-miner/README.md +++ b/utils/staking-miner/README.md @@ -28,8 +28,9 @@ There are 2 options to build a staking-miner Docker image: ### Building the injected image First build the binary as documented [above](#building). -You may then inject the binary into a Docker base image from the root of the Polkadot repository: +You may then inject the binary into a Docker base image: `parity/base-bin` (running the command from the root of the Polkadot repository): ``` +TODO: UPDATE THAT docker build -t staking-miner -f scripts/ci/dockerfiles/staking-miner/staking-miner_injected.Dockerfile target/release ``` @@ -39,6 +40,7 @@ Unlike the injected image that requires a Linux pre-built binary, this option do The trade-off however is that it takes a little longer to build and this option is less ideal for CI tasks. You may build the multi-stage image the root of the Polkadot repository with: ``` +TODO: UPDATE THAT docker build -t staking-miner -f scripts/ci/dockerfiles/staking-miner/staking-miner_builder.Dockerfile . ``` @@ -51,7 +53,7 @@ While it won't prevent a malicious actor to read your `SEED` if they gain access # The following line starts with an extra space on purpose: SEED=0x1234... -docker run --rm -it \ +docker run --rm -i \ --name staking-miner \ --read-only \ -e RUST_LOG=info \ From 12fdcba175d4b170a24112697ac4618e3c608567 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Fri, 11 Aug 2023 18:30:58 +0300 Subject: [PATCH 25/35] companion for 14754: cli: move no-beefy flag to sc-cli (#7600) * cli: move no-beefy flag to substrate sc-cli config * bump substrate ref --------- Signed-off-by: Adrian Catangiu --- Cargo.lock | 368 +++++++++--------- cli/src/cli.rs | 5 - cli/src/command.rs | 11 +- node/service/src/lib.rs | 3 +- node/test/service/src/lib.rs | 2 +- .../adder/collator/src/main.rs | 4 +- .../undying/collator/src/main.rs | 4 +- 7 files changed, 195 insertions(+), 202 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8fcd122e701a..9337c0cf7c47 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -617,7 +617,7 @@ dependencies = [ [[package]] name = "binary-merkle-tree" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "hash-db", "log", @@ -2446,7 +2446,7 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fork-tree" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "parity-scale-codec", ] @@ -2469,7 +2469,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frame-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-support", "frame-support-procedural", @@ -2494,7 +2494,7 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "Inflector", "array-bytes", @@ -2542,7 +2542,7 @@ dependencies = [ [[package]] name = "frame-election-provider-solution-type" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2553,7 +2553,7 @@ dependencies = [ [[package]] name = "frame-election-provider-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-election-provider-solution-type", "frame-support", @@ -2570,7 +2570,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-support", "frame-system", @@ -2599,7 +2599,7 @@ dependencies = [ [[package]] name = "frame-remote-externalities" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-recursion", "futures", @@ -2620,7 +2620,7 @@ dependencies = [ [[package]] name = "frame-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "aquamarine", "bitflags", @@ -2657,7 +2657,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "Inflector", "cfg-expr", @@ -2675,7 +2675,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", @@ -2687,7 +2687,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "proc-macro2", "quote", @@ -2697,7 +2697,7 @@ dependencies = [ [[package]] name = "frame-support-test" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-executive", @@ -2724,7 +2724,7 @@ dependencies = [ [[package]] name = "frame-support-test-pallet" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-support", "frame-system", @@ -2737,7 +2737,7 @@ dependencies = [ [[package]] name = "frame-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "cfg-if", "frame-support", @@ -2756,7 +2756,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -2771,7 +2771,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "parity-scale-codec", "sp-api", @@ -2780,7 +2780,7 @@ dependencies = [ [[package]] name = "frame-try-runtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-support", "parity-scale-codec", @@ -2962,7 +2962,7 @@ dependencies = [ [[package]] name = "generate-bags" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "chrono", "frame-election-provider-support", @@ -4829,7 +4829,7 @@ dependencies = [ [[package]] name = "mmr-gadget" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "futures", "log", @@ -4848,7 +4848,7 @@ dependencies = [ [[package]] name = "mmr-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "anyhow", "jsonrpsee", @@ -5374,7 +5374,7 @@ dependencies = [ [[package]] name = "pallet-assets" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5389,7 +5389,7 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-support", "frame-system", @@ -5405,7 +5405,7 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-support", "frame-system", @@ -5419,7 +5419,7 @@ dependencies = [ [[package]] name = "pallet-babe" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5443,7 +5443,7 @@ dependencies = [ [[package]] name = "pallet-bags-list" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5463,7 +5463,7 @@ dependencies = [ [[package]] name = "pallet-bags-list-remote-tests" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-election-provider-support", "frame-remote-externalities", @@ -5482,7 +5482,7 @@ dependencies = [ [[package]] name = "pallet-balances" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5497,7 +5497,7 @@ dependencies = [ [[package]] name = "pallet-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-support", "frame-system", @@ -5516,7 +5516,7 @@ dependencies = [ [[package]] name = "pallet-beefy-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "array-bytes", "binary-merkle-tree", @@ -5540,7 +5540,7 @@ dependencies = [ [[package]] name = "pallet-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5558,7 +5558,7 @@ dependencies = [ [[package]] name = "pallet-child-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5577,7 +5577,7 @@ dependencies = [ [[package]] name = "pallet-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5594,7 +5594,7 @@ dependencies = [ [[package]] name = "pallet-conviction-voting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "assert_matches", "frame-benchmarking", @@ -5611,7 +5611,7 @@ dependencies = [ [[package]] name = "pallet-democracy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5629,7 +5629,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-multi-phase" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5652,7 +5652,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-support-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5665,7 +5665,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5684,7 +5684,7 @@ dependencies = [ [[package]] name = "pallet-fast-unstake" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "docify", "frame-benchmarking", @@ -5703,7 +5703,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5726,7 +5726,7 @@ dependencies = [ [[package]] name = "pallet-identity" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "enumflags2", "frame-benchmarking", @@ -5742,7 +5742,7 @@ dependencies = [ [[package]] name = "pallet-im-online" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5762,7 +5762,7 @@ dependencies = [ [[package]] name = "pallet-indices" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5779,7 +5779,7 @@ dependencies = [ [[package]] name = "pallet-membership" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5796,7 +5796,7 @@ dependencies = [ [[package]] name = "pallet-message-queue" version = "7.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5815,7 +5815,7 @@ dependencies = [ [[package]] name = "pallet-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5832,7 +5832,7 @@ dependencies = [ [[package]] name = "pallet-multisig" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5848,7 +5848,7 @@ dependencies = [ [[package]] name = "pallet-nis" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5864,7 +5864,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-support", "frame-system", @@ -5883,7 +5883,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-benchmarking" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5903,7 +5903,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-runtime-api" version = "1.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "pallet-nomination-pools", "parity-scale-codec", @@ -5914,7 +5914,7 @@ dependencies = [ [[package]] name = "pallet-offences" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-support", "frame-system", @@ -5931,7 +5931,7 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5955,7 +5955,7 @@ dependencies = [ [[package]] name = "pallet-preimage" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5972,7 +5972,7 @@ dependencies = [ [[package]] name = "pallet-proxy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5987,7 +5987,7 @@ dependencies = [ [[package]] name = "pallet-ranked-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -6005,7 +6005,7 @@ dependencies = [ [[package]] name = "pallet-recovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -6020,7 +6020,7 @@ dependencies = [ [[package]] name = "pallet-referenda" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "assert_matches", "frame-benchmarking", @@ -6039,7 +6039,7 @@ dependencies = [ [[package]] name = "pallet-scheduler" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -6056,7 +6056,7 @@ dependencies = [ [[package]] name = "pallet-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-support", "frame-system", @@ -6077,7 +6077,7 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -6093,7 +6093,7 @@ dependencies = [ [[package]] name = "pallet-society" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -6111,7 +6111,7 @@ dependencies = [ [[package]] name = "pallet-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -6134,7 +6134,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -6145,7 +6145,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-fn" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "log", "sp-arithmetic", @@ -6154,7 +6154,7 @@ dependencies = [ [[package]] name = "pallet-staking-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "parity-scale-codec", "sp-api", @@ -6163,7 +6163,7 @@ dependencies = [ [[package]] name = "pallet-state-trie-migration" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -6180,7 +6180,7 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -6195,7 +6195,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -6213,7 +6213,7 @@ dependencies = [ [[package]] name = "pallet-tips" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -6232,7 +6232,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-support", "frame-system", @@ -6248,7 +6248,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", @@ -6264,7 +6264,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "pallet-transaction-payment", "parity-scale-codec", @@ -6276,7 +6276,7 @@ dependencies = [ [[package]] name = "pallet-treasury" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -6293,7 +6293,7 @@ dependencies = [ [[package]] name = "pallet-uniques" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -6308,7 +6308,7 @@ dependencies = [ [[package]] name = "pallet-utility" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -6324,7 +6324,7 @@ dependencies = [ [[package]] name = "pallet-vesting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -6339,7 +6339,7 @@ dependencies = [ [[package]] name = "pallet-whitelist" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -9364,7 +9364,7 @@ dependencies = [ [[package]] name = "sc-allocator" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "log", "sp-core", @@ -9375,7 +9375,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "futures", @@ -9403,7 +9403,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "futures", "futures-timer", @@ -9426,7 +9426,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -9441,7 +9441,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "memmap2", "sc-chain-spec-derive", @@ -9460,7 +9460,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -9471,7 +9471,7 @@ dependencies = [ [[package]] name = "sc-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "array-bytes", "chrono", @@ -9510,7 +9510,7 @@ dependencies = [ [[package]] name = "sc-client-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "fnv", "futures", @@ -9536,7 +9536,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "hash-db", "kvdb", @@ -9562,7 +9562,7 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "futures", @@ -9587,7 +9587,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "fork-tree", @@ -9623,7 +9623,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "futures", "jsonrpsee", @@ -9645,7 +9645,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "array-bytes", "async-channel", @@ -9679,7 +9679,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "futures", "jsonrpsee", @@ -9698,7 +9698,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "fork-tree", "parity-scale-codec", @@ -9711,7 +9711,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "ahash 0.8.2", "array-bytes", @@ -9752,7 +9752,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "finality-grandpa", "futures", @@ -9772,7 +9772,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "futures", @@ -9795,7 +9795,7 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -9817,7 +9817,7 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "sc-allocator", "sp-maybe-compressed-blob", @@ -9829,7 +9829,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "anyhow", "cfg-if", @@ -9846,7 +9846,7 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "ansi_term", "futures", @@ -9862,7 +9862,7 @@ dependencies = [ [[package]] name = "sc-keystore" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "array-bytes", "parking_lot 0.12.1", @@ -9876,7 +9876,7 @@ dependencies = [ [[package]] name = "sc-network" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "array-bytes", "async-channel", @@ -9919,7 +9919,7 @@ dependencies = [ [[package]] name = "sc-network-bitswap" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-channel", "cid", @@ -9939,7 +9939,7 @@ dependencies = [ [[package]] name = "sc-network-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "bitflags", @@ -9956,7 +9956,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "ahash 0.8.2", "futures", @@ -9975,7 +9975,7 @@ dependencies = [ [[package]] name = "sc-network-light" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "array-bytes", "async-channel", @@ -9996,7 +9996,7 @@ dependencies = [ [[package]] name = "sc-network-sync" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "array-bytes", "async-channel", @@ -10030,7 +10030,7 @@ dependencies = [ [[package]] name = "sc-network-transactions" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "array-bytes", "futures", @@ -10048,7 +10048,7 @@ dependencies = [ [[package]] name = "sc-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "array-bytes", "bytes", @@ -10082,7 +10082,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -10091,7 +10091,7 @@ dependencies = [ [[package]] name = "sc-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "futures", "jsonrpsee", @@ -10122,7 +10122,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -10141,7 +10141,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "http", "jsonrpsee", @@ -10156,7 +10156,7 @@ dependencies = [ [[package]] name = "sc-rpc-spec-v2" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "array-bytes", "futures", @@ -10183,7 +10183,7 @@ dependencies = [ [[package]] name = "sc-service" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "directories", @@ -10247,7 +10247,7 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "log", "parity-scale-codec", @@ -10258,7 +10258,7 @@ dependencies = [ [[package]] name = "sc-storage-monitor" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "clap 4.2.5", "fs4", @@ -10272,7 +10272,7 @@ dependencies = [ [[package]] name = "sc-sync-state-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -10291,7 +10291,7 @@ dependencies = [ [[package]] name = "sc-sysinfo" version = "6.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "futures", "libc", @@ -10310,7 +10310,7 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "chrono", "futures", @@ -10329,7 +10329,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "ansi_term", "atty", @@ -10358,7 +10358,7 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -10369,7 +10369,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "futures", @@ -10395,7 +10395,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "futures", @@ -10411,7 +10411,7 @@ dependencies = [ [[package]] name = "sc-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-channel", "futures", @@ -10959,7 +10959,7 @@ dependencies = [ [[package]] name = "sp-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "hash-db", "log", @@ -10980,7 +10980,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "Inflector", "blake2", @@ -10994,7 +10994,7 @@ dependencies = [ [[package]] name = "sp-application-crypto" version = "23.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "parity-scale-codec", "scale-info", @@ -11007,7 +11007,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "16.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "integer-sqrt", "num-traits", @@ -11021,7 +11021,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "parity-scale-codec", "scale-info", @@ -11034,7 +11034,7 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "sp-api", "sp-inherents", @@ -11045,7 +11045,7 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "futures", "log", @@ -11063,7 +11063,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "futures", @@ -11078,7 +11078,7 @@ dependencies = [ [[package]] name = "sp-consensus-aura" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "parity-scale-codec", @@ -11095,7 +11095,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "parity-scale-codec", @@ -11114,7 +11114,7 @@ dependencies = [ [[package]] name = "sp-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "lazy_static", "parity-scale-codec", @@ -11133,7 +11133,7 @@ dependencies = [ [[package]] name = "sp-consensus-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "finality-grandpa", "log", @@ -11151,7 +11151,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "parity-scale-codec", "scale-info", @@ -11163,7 +11163,7 @@ dependencies = [ [[package]] name = "sp-core" version = "21.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "array-bytes", "arrayvec 0.7.4", @@ -11210,7 +11210,7 @@ dependencies = [ [[package]] name = "sp-core-hashing" version = "9.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "blake2b_simd", "byteorder", @@ -11223,7 +11223,7 @@ dependencies = [ [[package]] name = "sp-core-hashing-proc-macro" version = "9.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "quote", "sp-core-hashing", @@ -11233,7 +11233,7 @@ dependencies = [ [[package]] name = "sp-database" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "kvdb", "parking_lot 0.12.1", @@ -11242,7 +11242,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "proc-macro2", "quote", @@ -11252,7 +11252,7 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.19.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "environmental", "parity-scale-codec", @@ -11263,7 +11263,7 @@ dependencies = [ [[package]] name = "sp-genesis-builder" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "serde_json", "sp-api", @@ -11274,7 +11274,7 @@ dependencies = [ [[package]] name = "sp-inherents" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -11288,7 +11288,7 @@ dependencies = [ [[package]] name = "sp-io" version = "23.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "bytes", "ed25519", @@ -11313,7 +11313,7 @@ dependencies = [ [[package]] name = "sp-keyring" version = "24.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "lazy_static", "sp-core", @@ -11324,7 +11324,7 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.27.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -11336,7 +11336,7 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "thiserror", "zstd 0.12.3+zstd.1.5.2", @@ -11345,7 +11345,7 @@ dependencies = [ [[package]] name = "sp-metadata-ir" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-metadata", "parity-scale-codec", @@ -11356,7 +11356,7 @@ dependencies = [ [[package]] name = "sp-mmr-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "ckb-merkle-mountain-range", "log", @@ -11374,7 +11374,7 @@ dependencies = [ [[package]] name = "sp-npos-elections" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "parity-scale-codec", "scale-info", @@ -11388,7 +11388,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "sp-api", "sp-core", @@ -11398,7 +11398,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "backtrace", "lazy_static", @@ -11408,7 +11408,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "rustc-hash", "serde", @@ -11418,7 +11418,7 @@ dependencies = [ [[package]] name = "sp-runtime" version = "24.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "either", "hash256-std-hasher", @@ -11440,7 +11440,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "17.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -11458,7 +11458,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "11.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "Inflector", "proc-macro-crate", @@ -11470,7 +11470,7 @@ dependencies = [ [[package]] name = "sp-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "parity-scale-codec", "scale-info", @@ -11485,7 +11485,7 @@ dependencies = [ [[package]] name = "sp-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -11499,7 +11499,7 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.28.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "hash-db", "log", @@ -11520,7 +11520,7 @@ dependencies = [ [[package]] name = "sp-statement-store" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "aes-gcm 0.10.2", "curve25519-dalek 3.2.0", @@ -11544,12 +11544,12 @@ dependencies = [ [[package]] name = "sp-std" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" [[package]] name = "sp-storage" version = "13.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "impl-serde", "parity-scale-codec", @@ -11562,7 +11562,7 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "parity-scale-codec", @@ -11575,7 +11575,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "10.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "parity-scale-codec", "sp-std", @@ -11587,7 +11587,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "sp-api", "sp-runtime", @@ -11596,7 +11596,7 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "parity-scale-codec", @@ -11611,7 +11611,7 @@ dependencies = [ [[package]] name = "sp-trie" version = "22.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "ahash 0.8.2", "hash-db", @@ -11634,7 +11634,7 @@ dependencies = [ [[package]] name = "sp-version" version = "22.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "impl-serde", "parity-scale-codec", @@ -11651,7 +11651,7 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "parity-scale-codec", "proc-macro2", @@ -11662,7 +11662,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "14.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "anyhow", "impl-trait-for-tuples", @@ -11675,7 +11675,7 @@ dependencies = [ [[package]] name = "sp-weights" version = "20.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "parity-scale-codec", "scale-info", @@ -11900,12 +11900,12 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" [[package]] name = "substrate-frame-rpc-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-system-rpc-runtime-api", "futures", @@ -11924,7 +11924,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "hyper", "log", @@ -11936,7 +11936,7 @@ dependencies = [ [[package]] name = "substrate-rpc-client" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "jsonrpsee", @@ -11949,7 +11949,7 @@ dependencies = [ [[package]] name = "substrate-state-trie-migration-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -11966,7 +11966,7 @@ dependencies = [ [[package]] name = "substrate-test-client" version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "array-bytes", "async-trait", @@ -11992,7 +11992,7 @@ dependencies = [ [[package]] name = "substrate-test-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "futures", "substrate-test-utils-derive", @@ -12002,7 +12002,7 @@ dependencies = [ [[package]] name = "substrate-test-utils-derive" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -12013,7 +12013,7 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "ansi_term", "build-helper", @@ -12890,7 +12890,7 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "try-runtime-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "clap 4.2.5", diff --git a/cli/src/cli.rs b/cli/src/cli.rs index b7d884750762..e78213cf11c8 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -99,11 +99,6 @@ pub struct RunCmd { #[arg(long = "grandpa-pause", num_args = 2)] pub grandpa_pause: Vec, - /// Disable the BEEFY gadget - /// (currently enabled by default on Rococo, Wococo and Versi). - #[arg(long)] - pub no_beefy: bool, - /// Add the destination address to the jaeger agent. /// /// Must be valid socket address, of format `IP:Port` diff --git a/cli/src/command.rs b/cli/src/command.rs index ee71bb0840dc..c8e8673c6d70 100644 --- a/cli/src/command.rs +++ b/cli/src/command.rs @@ -235,15 +235,11 @@ fn run_node_inner( where F: FnOnce(&mut sc_cli::LoggerBuilder, &sc_service::Configuration), { - let runner = cli + let mut runner = cli .create_runner_with_logger_hook::(&cli.run.base, logger_hook) .map_err(Error::from)?; let chain_spec = &runner.config().chain_spec; - // By default, enable BEEFY on test networks. - let enable_beefy = (chain_spec.is_rococo() || chain_spec.is_wococo() || chain_spec.is_versi()) && - !cli.run.no_beefy; - set_default_ss58_version(chain_spec); let grandpa_pause = if cli.run.grandpa_pause.is_empty() { @@ -259,6 +255,10 @@ where info!(" KUSAMA FOUNDATION "); info!("----------------------------"); } + // BEEFY allowed only on test networks. + if !(chain_spec.is_rococo() || chain_spec.is_wococo() || chain_spec.is_versi()) { + runner.config_mut().disable_beefy = true; + } let jaeger_agent = if let Some(ref jaeger_agent) = cli.run.jaeger_agent { Some( @@ -289,7 +289,6 @@ where service::NewFullParams { is_collator: service::IsCollator::No, grandpa_pause, - enable_beefy, jaeger_agent, telemetry_worker_handle: None, node_version, diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index 457b5488ea14..fa8cb8ec77f7 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -629,7 +629,6 @@ where pub struct NewFullParams { pub is_collator: IsCollator, pub grandpa_pause: Option<(u32, u32)>, - pub enable_beefy: bool, pub jaeger_agent: Option, pub telemetry_worker_handle: Option, /// The version of the node. TESTING ONLY: `None` can be passed to skip the node/worker version @@ -711,7 +710,6 @@ pub fn new_full( NewFullParams { is_collator, grandpa_pause, - enable_beefy, jaeger_agent, telemetry_worker_handle, node_version, @@ -746,6 +744,7 @@ pub fn new_full( Some(backoff) }; + let enable_beefy = !config.disable_beefy; // If not on a known test network, warn the user that BEEFY is still experimental. if enable_beefy && !config.chain_spec.is_rococo() && diff --git a/node/test/service/src/lib.rs b/node/test/service/src/lib.rs index 99ccacb78f7e..a2c1b1941003 100644 --- a/node/test/service/src/lib.rs +++ b/node/test/service/src/lib.rs @@ -81,7 +81,6 @@ pub fn new_full( polkadot_service::NewFullParams { is_collator, grandpa_pause: None, - enable_beefy: true, jaeger_agent: None, telemetry_worker_handle: None, node_version: None, @@ -188,6 +187,7 @@ pub fn node_config( offchain_worker: Default::default(), force_authoring: false, disable_grandpa: false, + disable_beefy: false, dev_key_seed: Some(key_seed), tracing_targets: None, tracing_receiver: Default::default(), diff --git a/parachain/test-parachains/adder/collator/src/main.rs b/parachain/test-parachains/adder/collator/src/main.rs index d4bfc50c8db7..8d8a13767178 100644 --- a/parachain/test-parachains/adder/collator/src/main.rs +++ b/parachain/test-parachains/adder/collator/src/main.rs @@ -53,15 +53,15 @@ fn main() -> Result<()> { ) })?; - runner.run_node_until_exit(|config| async move { + runner.run_node_until_exit(|mut config| async move { let collator = Collator::new(); + config.disable_beefy = true; let full_node = polkadot_service::build_full( config, polkadot_service::NewFullParams { is_collator: polkadot_service::IsCollator::Yes(collator.collator_key()), grandpa_pause: None, - enable_beefy: false, jaeger_agent: None, telemetry_worker_handle: None, diff --git a/parachain/test-parachains/undying/collator/src/main.rs b/parachain/test-parachains/undying/collator/src/main.rs index 3b6b4259aaec..da8205ba1893 100644 --- a/parachain/test-parachains/undying/collator/src/main.rs +++ b/parachain/test-parachains/undying/collator/src/main.rs @@ -53,15 +53,15 @@ fn main() -> Result<()> { ) })?; - runner.run_node_until_exit(|config| async move { + runner.run_node_until_exit(|mut config| async move { let collator = Collator::new(cli.run.pov_size, cli.run.pvf_complexity); + config.disable_beefy = true; let full_node = polkadot_service::build_full( config, polkadot_service::NewFullParams { is_collator: polkadot_service::IsCollator::Yes(collator.collator_key()), grandpa_pause: None, - enable_beefy: false, jaeger_agent: None, telemetry_worker_handle: None, From 0f57383be6c08c28f30dd02f28ac01bf1cc900d4 Mon Sep 17 00:00:00 2001 From: jserrat <35823283+Jpserrat@users.noreply.github.com> Date: Mon, 14 Aug 2023 06:48:15 -0300 Subject: [PATCH 26/35] pvf: use test-utils feature to export test only (#7538) * pvf: use test-utils feature to export test only * adding comment to test-utils feature * make prepare-worker and execute-worker as optional dependencies and add comments to test-utils * remove doc hidden from pvf testing * add prepare worker and execute worker entrypoints to test-utils feature * pvf: add sp_tracing as optional dependency of test-utils * add test-utils for polkadot and malus * add test-utils feature to prepare and execute workers script * remove required features from prepare and executing * Try to trigger CI again to fix broken jobs --------- Co-authored-by: Marcin S --- Cargo.lock | 3 +++ Cargo.toml | 2 +- node/core/pvf/Cargo.toml | 13 ++++++++++--- node/core/pvf/common/Cargo.toml | 7 ++++++- node/core/pvf/common/src/lib.rs | 4 ++-- node/core/pvf/common/src/pvf.rs | 6 +++--- node/core/pvf/src/lib.rs | 6 ++++-- node/core/pvf/src/testing.rs | 1 - node/core/pvf/tests/it/worker_common.rs | 6 ++++-- node/malus/Cargo.toml | 2 +- parachain/test-parachains/adder/collator/Cargo.toml | 11 +++++++++-- .../test-parachains/undying/collator/Cargo.toml | 11 +++++++++-- 12 files changed, 52 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9337c0cf7c47..624483993f68 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7239,6 +7239,7 @@ dependencies = [ "parity-scale-codec", "pin-project", "polkadot-core-primitives", + "polkadot-node-core-pvf", "polkadot-node-core-pvf-common", "polkadot-node-core-pvf-execute-worker", "polkadot-node-core-pvf-prepare-worker", @@ -12257,6 +12258,7 @@ dependencies = [ "sp-keyring", "substrate-test-utils", "test-parachain-adder", + "test-parachain-adder-collator", "tokio", ] @@ -12305,6 +12307,7 @@ dependencies = [ "sp-keyring", "substrate-test-utils", "test-parachain-undying", + "test-parachain-undying-collator", "tokio", ] diff --git a/Cargo.toml b/Cargo.toml index 05ec768a9771..136bda4bffcd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -40,7 +40,7 @@ polkadot-overseer = { path = "node/overseer" } tracking-allocator = { path = "node/tracking-allocator", optional = true } # Needed for worker binaries. -polkadot-node-core-pvf-common = { path = "node/core/pvf/common" } +polkadot-node-core-pvf-common = { path = "node/core/pvf/common", features = ["test-utils"] } polkadot-node-core-pvf-execute-worker = { path = "node/core/pvf/execute-worker" } [dev-dependencies] diff --git a/node/core/pvf/Cargo.toml b/node/core/pvf/Cargo.toml index 02a56ed9d2df..b55df45b0203 100644 --- a/node/core/pvf/Cargo.toml +++ b/node/core/pvf/Cargo.toml @@ -9,6 +9,7 @@ license.workspace = true [[bin]] name = "puppet_worker" path = "bin/puppet_worker.rs" +required-features = ["test-utils"] [dependencies] always-assert = "0.1" @@ -27,8 +28,6 @@ parity-scale-codec = { version = "3.6.1", default-features = false, features = [ polkadot-parachain = { path = "../../../parachain" } polkadot-core-primitives = { path = "../../../core-primitives" } polkadot-node-core-pvf-common = { path = "common" } -polkadot-node-core-pvf-execute-worker = { path = "execute-worker" } -polkadot-node-core-pvf-prepare-worker = { path = "prepare-worker" } polkadot-node-metrics = { path = "../../metrics" } polkadot-node-primitives = { path = "../../primitives" } polkadot-primitives = { path = "../../../primitives" } @@ -36,7 +35,9 @@ polkadot-primitives = { path = "../../../primitives" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-wasm-interface = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-maybe-compressed-blob = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-tracing = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-tracing = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true } +polkadot-node-core-pvf-prepare-worker = { path = "prepare-worker", optional = true } +polkadot-node-core-pvf-execute-worker = { path = "execute-worker", optional = true } [build-dependencies] substrate-build-script-utils = { git = "https://github.com/paritytech/substrate", branch = "master" } @@ -44,9 +45,15 @@ substrate-build-script-utils = { git = "https://github.com/paritytech/substrate" [dev-dependencies] assert_matches = "1.4.0" hex-literal = "0.3.4" +polkadot-node-core-pvf-common = { path = "common", features = ["test-utils"] } +# For the puppet worker, depend on ourselves with the test-utils feature. +polkadot-node-core-pvf = { path = ".", features = ["test-utils"] } adder = { package = "test-parachain-adder", path = "../../../parachain/test-parachains/adder" } halt = { package = "test-parachain-halt", path = "../../../parachain/test-parachains/halt" } [features] ci-only-tests = [] +# This feature is used to export test code to other crates without putting it in the production build. +# This is also used by the `puppet_worker` binary. +test-utils = ["polkadot-node-core-pvf-prepare-worker", "polkadot-node-core-pvf-execute-worker", "sp-tracing"] diff --git a/node/core/pvf/common/Cargo.toml b/node/core/pvf/common/Cargo.toml index a091f8f75806..dfb490455b3d 100644 --- a/node/core/pvf/common/Cargo.toml +++ b/node/core/pvf/common/Cargo.toml @@ -25,7 +25,7 @@ sc-executor-wasmtime = { git = "https://github.com/paritytech/substrate", branch sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-externalities = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-tracing = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-tracing = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true } [target.'cfg(target_os = "linux")'.dependencies] landlock = "0.2.0" @@ -33,3 +33,8 @@ landlock = "0.2.0" [dev-dependencies] assert_matches = "1.4.0" tempfile = "3.3.0" + +[features] +# This feature is used to export test code to other crates without putting it in the production build. +# Also used for building the puppet worker. +test-utils = ["sp-tracing"] diff --git a/node/core/pvf/common/src/lib.rs b/node/core/pvf/common/src/lib.rs index 7e0cab45b671..8ff9757a07a0 100644 --- a/node/core/pvf/common/src/lib.rs +++ b/node/core/pvf/common/src/lib.rs @@ -26,7 +26,7 @@ pub mod worker; pub use cpu_time::ProcessTime; // Used by `decl_worker_main!`. -#[doc(hidden)] +#[cfg(feature = "test-utils")] pub use sp_tracing; const LOG_TARGET: &str = "parachain::pvf-common"; @@ -34,7 +34,7 @@ const LOG_TARGET: &str = "parachain::pvf-common"; use std::mem; use tokio::io::{self, AsyncRead, AsyncReadExt as _, AsyncWrite, AsyncWriteExt as _}; -#[doc(hidden)] +#[cfg(feature = "test-utils")] pub mod tests { use std::time::Duration; diff --git a/node/core/pvf/common/src/pvf.rs b/node/core/pvf/common/src/pvf.rs index ab0007352d1d..e31264713a57 100644 --- a/node/core/pvf/common/src/pvf.rs +++ b/node/core/pvf/common/src/pvf.rs @@ -84,7 +84,7 @@ impl PvfPrepData { } /// Creates a structure for tests. - #[doc(hidden)] + #[cfg(feature = "test-utils")] pub fn from_discriminator_and_timeout(num: u32, timeout: Duration) -> Self { let descriminator_buf = num.to_le_bytes().to_vec(); Self::from_code( @@ -96,13 +96,13 @@ impl PvfPrepData { } /// Creates a structure for tests. - #[doc(hidden)] + #[cfg(feature = "test-utils")] pub fn from_discriminator(num: u32) -> Self { Self::from_discriminator_and_timeout(num, crate::tests::TEST_PREPARATION_TIMEOUT) } /// Creates a structure for tests. - #[doc(hidden)] + #[cfg(feature = "test-utils")] pub fn from_discriminator_precheck(num: u32) -> Self { let mut pvf = Self::from_discriminator_and_timeout(num, crate::tests::TEST_PREPARATION_TIMEOUT); diff --git a/node/core/pvf/src/lib.rs b/node/core/pvf/src/lib.rs index 2ed3f5242ded..eb6ab39ac500 100644 --- a/node/core/pvf/src/lib.rs +++ b/node/core/pvf/src/lib.rs @@ -97,11 +97,11 @@ mod prepare; mod priority; mod worker_intf; -#[doc(hidden)] +#[cfg(feature = "test-utils")] pub mod testing; // Used by `decl_puppet_worker_main!`. -#[doc(hidden)] +#[cfg(feature = "test-utils")] pub use sp_tracing; pub use error::{InvalidCandidate, ValidationError}; @@ -118,7 +118,9 @@ pub use polkadot_node_core_pvf_common::{ }; // Re-export worker entrypoints. +#[cfg(feature = "test-utils")] pub use polkadot_node_core_pvf_execute_worker::worker_entrypoint as execute_worker_entrypoint; +#[cfg(feature = "test-utils")] pub use polkadot_node_core_pvf_prepare_worker::worker_entrypoint as prepare_worker_entrypoint; /// The log target for this crate. diff --git a/node/core/pvf/src/testing.rs b/node/core/pvf/src/testing.rs index 3cd1ce304ab8..980a28c01566 100644 --- a/node/core/pvf/src/testing.rs +++ b/node/core/pvf/src/testing.rs @@ -19,7 +19,6 @@ //! N.B. This is not guarded with some feature flag. Overexposing items here may affect the final //! artifact even for production builds. -#[doc(hidden)] pub use crate::worker_intf::{spawn_with_program_path, SpawnErr}; use polkadot_primitives::ExecutorParams; diff --git a/node/core/pvf/tests/it/worker_common.rs b/node/core/pvf/tests/it/worker_common.rs index 439ac8538c95..a3bf552e894a 100644 --- a/node/core/pvf/tests/it/worker_common.rs +++ b/node/core/pvf/tests/it/worker_common.rs @@ -14,10 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use crate::PUPPET_EXE; -use polkadot_node_core_pvf::testing::{spawn_with_program_path, SpawnErr}; use std::time::Duration; +use polkadot_node_core_pvf::testing::{spawn_with_program_path, SpawnErr}; + +use crate::PUPPET_EXE; + // Test spawning a program that immediately exits with a failure code. #[tokio::test] async fn spawn_immediate_exit() { diff --git a/node/malus/Cargo.toml b/node/malus/Cargo.toml index 08656ea9f3da..0c9988159516 100644 --- a/node/malus/Cargo.toml +++ b/node/malus/Cargo.toml @@ -48,7 +48,7 @@ erasure = { package = "polkadot-erasure-coding", path = "../../erasure-coding" } rand = "0.8.5" # Required for worker binaries to build. -polkadot-node-core-pvf-common = { path = "../core/pvf/common" } +polkadot-node-core-pvf-common = { path = "../core/pvf/common", features = ["test-utils"] } polkadot-node-core-pvf-execute-worker = { path = "../core/pvf/execute-worker" } polkadot-node-core-pvf-prepare-worker = { path = "../core/pvf/prepare-worker" } diff --git a/parachain/test-parachains/adder/collator/Cargo.toml b/parachain/test-parachains/adder/collator/Cargo.toml index fec95a5718a1..08dcbcaa644e 100644 --- a/parachain/test-parachains/adder/collator/Cargo.toml +++ b/parachain/test-parachains/adder/collator/Cargo.toml @@ -13,6 +13,7 @@ path = "src/main.rs" [[bin]] name = "adder_collator_puppet_worker" path = "bin/puppet_worker.rs" +required-features = ["test-utils"] [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } @@ -31,11 +32,10 @@ polkadot-node-subsystem = { path = "../../../../node/subsystem" } sc-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-service = { git = "https://github.com/paritytech/substrate", branch = "master" } - # This one is tricky. Even though it is not used directly by the collator, we still need it for the # `puppet_worker` binary, which is required for the integration test. However, this shouldn't be # a big problem since it is used transitively anyway. -polkadot-node-core-pvf = { path = "../../../../node/core/pvf" } +polkadot-node-core-pvf = { path = "../../../../node/core/pvf", features = ["test-utils"], optional = true } [dev-dependencies] polkadot-parachain = { path = "../../.." } @@ -44,5 +44,12 @@ polkadot-test-service = { path = "../../../../node/test/service" } substrate-test-utils = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-service = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } +# For the puppet worker, depend on ourselves with the test-utils feature. +test-parachain-adder-collator = { path = ".", features = ["test-utils"] } tokio = { version = "1.24.2", features = ["macros"] } + +[features] +# This feature is used to export test code to other crates without putting it in the production build. +# This is also used by the `puppet_worker` binary. +test-utils = ["polkadot-node-core-pvf/test-utils"] diff --git a/parachain/test-parachains/undying/collator/Cargo.toml b/parachain/test-parachains/undying/collator/Cargo.toml index 4f1a34f977c8..5b5656efb4ac 100644 --- a/parachain/test-parachains/undying/collator/Cargo.toml +++ b/parachain/test-parachains/undying/collator/Cargo.toml @@ -13,6 +13,7 @@ path = "src/main.rs" [[bin]] name = "undying_collator_puppet_worker" path = "bin/puppet_worker.rs" +required-features = ["test-utils"] [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } @@ -31,18 +32,24 @@ polkadot-node-subsystem = { path = "../../../../node/subsystem" } sc-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-service = { git = "https://github.com/paritytech/substrate", branch = "master" } - # This one is tricky. Even though it is not used directly by the collator, we still need it for the # `puppet_worker` binary, which is required for the integration test. However, this shouldn't be # a big problem since it is used transitively anyway. -polkadot-node-core-pvf = { path = "../../../../node/core/pvf" } +polkadot-node-core-pvf = { path = "../../../../node/core/pvf", features = ["test-utils"], optional = true } [dev-dependencies] polkadot-parachain = { path = "../../.." } polkadot-test-service = { path = "../../../../node/test/service" } +# For the puppet worker, depend on ourselves with the test-utils feature. +test-parachain-undying-collator = { path = ".", features = ["test-utils"] } substrate-test-utils = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-service = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } tokio = { version = "1.24.2", features = ["macros"] } + +[features] +# This feature is used to export test code to other crates without putting it in the production build. +# This is also used by the `puppet_worker` binary. +test-utils = ["polkadot-node-core-pvf/test-utils"] From 2dda5906d0d390cb5dd3549938210861e0dc9b3e Mon Sep 17 00:00:00 2001 From: Chevdor Date: Mon, 14 Aug 2023 12:00:24 +0200 Subject: [PATCH 27/35] RC container image fixes (#7607) * Remove ENV for the artifacts folder --- .github/workflows/release-40_publish-rc-image.yml | 12 ++++++------ scripts/ci/common/lib.sh | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/release-40_publish-rc-image.yml b/.github/workflows/release-40_publish-rc-image.yml index a821eaa033fd..c46bf534b060 100644 --- a/.github/workflows/release-40_publish-rc-image.yml +++ b/.github/workflows/release-40_publish-rc-image.yml @@ -31,7 +31,6 @@ env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} DOCKER_OWNER: ${{ inputs.owner || github.repository_owner }} REPO: ${{ github.repository }} - ARTIFACT_FOLDER: release-artifacts jobs: fetch-artifacts: @@ -51,7 +50,7 @@ jobs: with: key: artifacts-${{ github.sha }} path: | - ${ARTIFACT_FOLDER}/**/* + ./release-artifacts/**/* build-container: runs-on: ubuntu-latest @@ -69,11 +68,12 @@ jobs: uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 with: key: artifacts-${{ github.sha }} + fail-on-cache-miss: true path: | - ${ARTIFACT_FOLDER}/**/* + ./release-artifacts/**/* - name: Check sha256 ${{ matrix.binary }} - working-directory: ${ARTIFACT_FOLDER} + working-directory: ./release-artifacts run: | . ../scripts/ci/common/lib.sh @@ -81,7 +81,7 @@ jobs: check_sha256 ${{ matrix.binary }} && echo "OK" || echo "ERR" - name: Check GPG ${{ matrix.binary }} - working-directory: ${ARTIFACT_FOLDER} + working-directory: ./release-artifacts run: | . ../scripts/ci/common/lib.sh import_gpg_keys @@ -102,7 +102,7 @@ jobs: - name: Build Injected Container image for ${{ matrix.binary }} env: - BIN_FOLDER: ${ARTIFACT_FOLDER} + BIN_FOLDER: ./release-artifacts BINARY: ${{ matrix.binary }} TAGS: ${{join(steps.fetch_refs.outputs.*, ',')}} run: | diff --git a/scripts/ci/common/lib.sh b/scripts/ci/common/lib.sh index 00abe9a1d8d4..a04dc2ef1da0 100755 --- a/scripts/ci/common/lib.sh +++ b/scripts/ci/common/lib.sh @@ -201,7 +201,6 @@ check_bootnode(){ fetch_release_artifacts() { echo "Release ID : $RELEASE_ID" echo "Repo : $REPO" - echo "ARTIFACT_FOLDER: $ARTIFACT_FOLDER" curl -L -s \ -H "Accept: application/vnd.github+json" \ @@ -214,8 +213,8 @@ fetch_release_artifacts() { count=$(jq '.assets|length' < release.json ) # Fetch artifacts - mkdir -p ${ARTIFACT_FOLDER} - pushd ${ARTIFACT_FOLDER} > /dev/null + mkdir -p "./release-artifacts" + pushd "./release-artifacts" > /dev/null iter=1 for id in "${ids[@]}" @@ -227,6 +226,7 @@ fetch_release_artifacts() { iter=$((iter + 1)) done + pwd ls -al --color popd > /dev/null } From 730a1c8a559bff2f2d2c6d7a6d0edcf508f1c0aa Mon Sep 17 00:00:00 2001 From: Chevdor Date: Mon, 14 Aug 2023 13:11:11 +0200 Subject: [PATCH 28/35] Fix the user used to login to Docker hub (#7610) --- .github/workflows/release-40_publish-rc-image.yml | 2 +- scripts/ci/common/lib.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release-40_publish-rc-image.yml b/.github/workflows/release-40_publish-rc-image.yml index c46bf534b060..3d91c5b8c682 100644 --- a/.github/workflows/release-40_publish-rc-image.yml +++ b/.github/workflows/release-40_publish-rc-image.yml @@ -112,7 +112,7 @@ jobs: - name: Login to Dockerhub uses: docker/login-action@v2 with: - username: ${{ inputs.owner }} + username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Push Container image for ${{ matrix.binary }} diff --git a/scripts/ci/common/lib.sh b/scripts/ci/common/lib.sh index a04dc2ef1da0..e490ec22d5bf 100755 --- a/scripts/ci/common/lib.sh +++ b/scripts/ci/common/lib.sh @@ -206,7 +206,7 @@ fetch_release_artifacts() { -H "Accept: application/vnd.github+json" \ -H "Authorization: Bearer ${GITHUB_TOKEN}" \ -H "X-GitHub-Api-Version: 2022-11-28" \ - https://api.github.com/repos/${REPO}/releases/$RELEASE_ID > release.json + https://api.github.com/repos/${REPO}/releases/${RELEASE_ID} > release.json # Get Asset ids ids=($(jq -r '.assets[].id' < release.json )) From 04ae5328a91675099c65cdcc94f074e2820b1832 Mon Sep 17 00:00:00 2001 From: Aaro Altonen <48052676+altonen@users.noreply.github.com> Date: Mon, 14 Aug 2023 16:52:52 +0300 Subject: [PATCH 29/35] Remove ParityDb migration tests (#7612) --- node/service/src/parachains_db/upgrade.rs | 119 ---------------------- 1 file changed, 119 deletions(-) diff --git a/node/service/src/parachains_db/upgrade.rs b/node/service/src/parachains_db/upgrade.rs index 6041a093ef9b..54ef97afd71c 100644 --- a/node/service/src/parachains_db/upgrade.rs +++ b/node/service/src/parachains_db/upgrade.rs @@ -278,17 +278,6 @@ pub(crate) fn paritydb_version_3_config(path: &Path) -> parity_db::Options { options } -/// Database configuration for version 0. This is useful just for testing. -#[cfg(test)] -pub(crate) fn paritydb_version_0_config(path: &Path) -> parity_db::Options { - let mut options = - parity_db::Options::with_columns(&path, super::columns::v1::NUM_COLUMNS as u8); - options.columns[super::columns::v3::COL_AVAILABILITY_META as usize].btree_index = true; - options.columns[super::columns::v3::COL_CHAIN_SELECTION_DATA as usize].btree_index = true; - - options -} - /// Migration from version 0 to version 1. /// Cases covered: /// - upgrading from v0.9.23 or earlier -> the `dispute coordinator column` was changed @@ -332,82 +321,6 @@ mod tests { *, }; - #[test] - fn test_paritydb_migrate_0_to_1() { - use parity_db::Db; - - let db_dir = tempfile::tempdir().unwrap(); - let path = db_dir.path(); - { - let db = Db::open_or_create(&paritydb_version_0_config(&path)).unwrap(); - - db.commit(vec![ - (COL_DISPUTE_COORDINATOR_DATA as u8, b"1234".to_vec(), Some(b"somevalue".to_vec())), - (COL_AVAILABILITY_META as u8, b"5678".to_vec(), Some(b"somevalue".to_vec())), - ]) - .unwrap(); - } - - try_upgrade_db(&path, DatabaseKind::ParityDB).unwrap(); - - let db = Db::open(&paritydb_version_1_config(&path)).unwrap(); - assert_eq!(db.get(COL_DISPUTE_COORDINATOR_DATA as u8, b"1234").unwrap(), None); - assert_eq!( - db.get(COL_AVAILABILITY_META as u8, b"5678").unwrap(), - Some("somevalue".as_bytes().to_vec()) - ); - } - - #[test] - fn test_paritydb_migrate_1_to_2() { - use parity_db::Db; - - let db_dir = tempfile::tempdir().unwrap(); - let path = db_dir.path(); - - // We need to properly set db version for upgrade to work. - fs::write(version_file_path(path), "1").expect("Failed to write DB version"); - - { - let db = Db::open_or_create(&paritydb_version_1_config(&path)).unwrap(); - - // Write some dummy data - db.commit(vec![( - COL_DISPUTE_COORDINATOR_DATA as u8, - b"1234".to_vec(), - Some(b"somevalue".to_vec()), - )]) - .unwrap(); - - assert_eq!(db.num_columns(), columns::v1::NUM_COLUMNS as u8); - } - - try_upgrade_db(&path, DatabaseKind::ParityDB).unwrap(); - - let db = Db::open(&paritydb_version_2_config(&path)).unwrap(); - - assert_eq!(db.num_columns(), columns::v2::NUM_COLUMNS as u8); - - assert_eq!( - db.get(COL_DISPUTE_COORDINATOR_DATA as u8, b"1234").unwrap(), - Some("somevalue".as_bytes().to_vec()) - ); - - // Test we can write the new column. - db.commit(vec![( - COL_SESSION_WINDOW_DATA as u8, - b"1337".to_vec(), - Some(b"0xdeadb00b".to_vec()), - )]) - .unwrap(); - - // Read back data from new column. - assert_eq!( - db.get(COL_SESSION_WINDOW_DATA as u8, b"1337").unwrap(), - Some("0xdeadb00b".as_bytes().to_vec()) - ); - } - #[test] fn test_rocksdb_migrate_1_to_2() { use kvdb::{DBKey, DBOp}; @@ -467,38 +380,6 @@ mod tests { ); } - #[test] - fn test_paritydb_migrate_2_to_3() { - use parity_db::Db; - - let db_dir = tempfile::tempdir().unwrap(); - let path = db_dir.path(); - let test_key = b"1337"; - - // We need to properly set db version for upgrade to work. - fs::write(version_file_path(path), "2").expect("Failed to write DB version"); - - { - let db = Db::open_or_create(&paritydb_version_2_config(&path)).unwrap(); - - // Write some dummy data - db.commit(vec![( - COL_SESSION_WINDOW_DATA as u8, - test_key.to_vec(), - Some(b"0xdeadb00b".to_vec()), - )]) - .unwrap(); - - assert_eq!(db.num_columns(), columns::v2::NUM_COLUMNS as u8); - } - - try_upgrade_db(&path, DatabaseKind::ParityDB).unwrap(); - - let db = Db::open(&paritydb_version_3_config(&path)).unwrap(); - - assert_eq!(db.num_columns(), columns::v3::NUM_COLUMNS as u8); - } - #[test] fn test_rocksdb_migrate_2_to_3() { use kvdb_rocksdb::{Database, DatabaseConfig}; From 6f9fe2682d8dd6840ee4a079bac4b6bb3ea33a8c Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Mon, 14 Aug 2023 16:29:29 +0200 Subject: [PATCH 30/35] Use same `fmt` and `clippy` configs as in Substrate (#7611) * Use same rustfmt.toml as Substrate Signed-off-by: Oliver Tale-Yazdi * format format file Signed-off-by: Oliver Tale-Yazdi * Format with new config Signed-off-by: Oliver Tale-Yazdi * Add Substrate Clippy config Signed-off-by: Oliver Tale-Yazdi * Print Clippy version in CI Otherwise its difficult to reproduce locally. Signed-off-by: Oliver Tale-Yazdi * Make fmt happy Signed-off-by: Oliver Tale-Yazdi * Update node/core/pvf/src/error.rs Co-authored-by: Tsvetomir Dimitrov * Update node/core/pvf/src/error.rs Co-authored-by: Tsvetomir Dimitrov --------- Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Tsvetomir Dimitrov --- .cargo/config.toml | 1 + cli/src/cli.rs | 4 +- cli/src/command.rs | 4 +- core-primitives/src/lib.rs | 12 +- node/collation-generation/src/lib.rs | 22 ++-- node/collation-generation/src/tests.rs | 10 +- .../approval-voting/src/approval_checking.rs | 8 +- node/core/approval-voting/src/criteria.rs | 15 +-- node/core/approval-voting/src/import.rs | 28 ++--- node/core/approval-voting/src/lib.rs | 35 +++--- node/core/approval-voting/src/ops.rs | 7 +- node/core/av-store/src/lib.rs | 35 +++--- node/core/backing/src/lib.rs | 14 ++- node/core/backing/src/metrics.rs | 3 +- node/core/backing/src/tests.rs | 3 +- node/core/bitfield-signing/src/lib.rs | 4 +- node/core/candidate-validation/src/lib.rs | 33 +++--- node/core/chain-selection/src/lib.rs | 6 +- node/core/dispute-coordinator/src/db/v1.rs | 7 +- node/core/dispute-coordinator/src/import.rs | 8 +- .../dispute-coordinator/src/initialized.rs | 12 +- node/core/dispute-coordinator/src/lib.rs | 19 +-- .../src/participation/queues/mod.rs | 22 ++-- .../src/participation/queues/tests.rs | 4 +- .../src/participation/tests.rs | 3 +- .../src/scraping/candidates.rs | 6 +- .../dispute-coordinator/src/scraping/mod.rs | 15 +-- .../dispute-coordinator/src/scraping/tests.rs | 19 +-- node/core/dispute-coordinator/src/tests.rs | 62 ++++++---- node/core/parachains-inherent/src/lib.rs | 11 +- node/core/provisioner/src/disputes/mod.rs | 6 +- .../src/disputes/prioritized_selection/mod.rs | 45 ++++---- .../disputes/prioritized_selection/tests.rs | 19 +-- node/core/provisioner/src/error.rs | 3 +- node/core/provisioner/src/lib.rs | 33 +++--- node/core/provisioner/src/metrics.rs | 7 +- node/core/provisioner/src/tests.rs | 3 +- node/core/pvf-checker/src/lib.rs | 4 +- node/core/pvf-checker/src/tests.rs | 4 +- node/core/pvf/common/src/error.rs | 23 ++-- node/core/pvf/common/src/executor_intf.rs | 20 ++-- node/core/pvf/common/src/worker/mod.rs | 6 +- node/core/pvf/execute-worker/src/lib.rs | 3 +- node/core/pvf/prepare-worker/src/lib.rs | 17 +-- .../pvf/prepare-worker/src/memory_stats.rs | 4 +- node/core/pvf/src/artifacts.rs | 3 +- node/core/pvf/src/error.rs | 29 ++--- node/core/pvf/src/execute/queue.rs | 3 +- node/core/pvf/src/execute/worker_intf.rs | 8 +- node/core/pvf/src/host.rs | 19 +-- node/core/pvf/src/lib.rs | 30 ++--- node/core/pvf/src/metrics.rs | 3 +- node/core/pvf/src/prepare/pool.rs | 16 +-- node/core/pvf/src/prepare/queue.rs | 5 +- node/core/pvf/src/prepare/worker_intf.rs | 4 +- node/core/pvf/src/worker_intf.rs | 26 +++-- node/core/runtime-api/src/lib.rs | 11 +- node/core/runtime-api/src/tests.rs | 3 +- node/gum/src/lib.rs | 15 ++- node/jaeger/src/lib.rs | 3 +- node/jaeger/src/spans.rs | 4 +- node/malus/src/variants/common.rs | 22 ++-- .../src/variants/dispute_valid_candidates.rs | 11 +- .../src/variants/suggest_garbage_candidate.rs | 11 +- node/metrics/src/lib.rs | 3 +- node/network/approval-distribution/src/lib.rs | 36 +++--- .../src/requester/fetch_task/mod.rs | 3 +- .../src/requester/mod.rs | 17 ++- .../src/futures_undead.rs | 1 - node/network/availability-recovery/src/lib.rs | 49 ++++---- .../availability-recovery/src/tests.rs | 3 +- node/network/bridge/src/rx/mod.rs | 6 +- node/network/bridge/src/rx/tests.rs | 5 +- node/network/bridge/src/tx/mod.rs | 3 +- .../network/bridge/src/validator_discovery.rs | 7 +- .../src/collator_side/mod.rs | 15 +-- .../src/collator_side/tests.rs | 10 +- .../src/collator_side/validators_buffer.rs | 6 +- .../src/validator_side/tests.rs | 3 +- node/network/dispute-distribution/src/lib.rs | 8 +- .../src/receiver/batches/batch.rs | 4 +- .../src/receiver/batches/waiting_queue.rs | 4 +- .../dispute-distribution/src/receiver/mod.rs | 10 +- .../src/sender/send_task.rs | 11 +- node/network/gossip-support/src/lib.rs | 3 +- node/network/protocol/src/grid_topology.rs | 19 +-- node/network/protocol/src/lib.rs | 6 +- node/network/protocol/src/peer_set.rs | 3 +- .../src/request_response/incoming/mod.rs | 4 +- .../protocol/src/request_response/mod.rs | 6 +- .../network/statement-distribution/src/lib.rs | 49 ++++---- .../statement-distribution/src/tests.rs | 8 +- node/overseer/src/lib.rs | 34 +++--- node/primitives/src/disputes/message.rs | 4 +- node/primitives/src/disputes/status.rs | 11 +- node/primitives/src/lib.rs | 26 +++-- node/service/src/chain_spec.rs | 11 +- node/service/src/fake_runtime_api.rs | 3 +- node/service/src/lib.rs | 14 ++- node/service/src/relay_chain_selection.rs | 11 +- node/service/src/tests.rs | 26 ++--- node/subsystem-test-helpers/src/lib.rs | 3 +- node/subsystem-types/src/lib.rs | 4 +- node/subsystem-types/src/messages.rs | 109 ++++++++++-------- node/subsystem-types/src/runtime_client.rs | 7 +- node/subsystem-util/src/lib.rs | 9 +- node/subsystem-util/src/nesting_sender.rs | 21 ++-- node/subsystem-util/src/reputation.rs | 3 +- node/test/client/src/block_builder.rs | 19 +-- node/test/service/src/lib.rs | 12 +- parachain/src/primitives.rs | 17 +-- .../test-parachains/adder/collator/src/lib.rs | 8 +- .../adder/collator/tests/integration.rs | 3 +- .../undying/collator/src/lib.rs | 8 +- .../undying/collator/tests/integration.rs | 3 +- primitives/src/runtime_api.rs | 21 ++-- primitives/src/v5/metrics.rs | 10 +- primitives/src/v5/mod.rs | 79 +++++++------ primitives/test-helpers/src/lib.rs | 3 +- runtime/common/slot_range_helper/src/lib.rs | 8 +- runtime/common/src/assigned_slots.rs | 9 +- runtime/common/src/auctions.rs | 42 +++---- runtime/common/src/claims.rs | 21 ++-- runtime/common/src/crowdloan/migration.rs | 4 +- runtime/common/src/crowdloan/mod.rs | 67 ++++++----- runtime/common/src/integration_tests.rs | 9 +- runtime/common/src/paras_registrar.rs | 44 ++++--- runtime/common/src/paras_sudo_wrapper.rs | 8 +- runtime/common/src/purchase.rs | 23 ++-- runtime/common/src/slots/mod.rs | 33 +++--- runtime/common/src/traits.rs | 25 ++-- runtime/kusama/src/xcm_config.rs | 28 ++--- runtime/parachains/src/builder.rs | 16 +-- runtime/parachains/src/configuration.rs | 48 ++++---- .../src/configuration/migration/v7.rs | 21 ++-- runtime/parachains/src/disputes.rs | 7 +- runtime/parachains/src/disputes/migration.rs | 6 +- runtime/parachains/src/disputes/tests.rs | 6 +- runtime/parachains/src/hrmp.rs | 19 +-- runtime/parachains/src/inclusion/mod.rs | 27 ++--- runtime/parachains/src/initializer.rs | 12 +- runtime/parachains/src/origin.rs | 1 - runtime/parachains/src/paras/mod.rs | 91 ++++++++------- runtime/parachains/src/paras/tests.rs | 8 +- runtime/parachains/src/paras_inherent/mod.rs | 42 ++++--- .../parachains/src/paras_inherent/tests.rs | 51 +++++--- runtime/parachains/src/runtime_api_impl/v5.rs | 3 +- runtime/parachains/src/scheduler.rs | 94 ++++++++------- runtime/parachains/src/scheduler/tests.rs | 14 ++- runtime/parachains/src/shared.rs | 4 +- runtime/parachains/src/util.rs | 4 +- runtime/polkadot/src/governance/old.rs | 3 +- runtime/polkadot/src/xcm_config.rs | 23 ++-- runtime/rococo/src/xcm_config.rs | 12 +- runtime/test-runtime/src/lib.rs | 4 +- runtime/test-runtime/src/xcm_config.rs | 4 +- runtime/westend/src/lib.rs | 4 +- runtime/westend/src/xcm_config.rs | 4 +- rustfmt.toml | 12 +- scripts/ci/gitlab/pipeline/test.yml | 1 + statement-table/src/generic.rs | 17 +-- tests/common.rs | 3 +- utils/staking-miner/src/opts.rs | 26 +++-- utils/staking-miner/src/rpc.rs | 3 +- xcm/pallet-xcm-benchmarks/src/generic/mod.rs | 9 +- xcm/pallet-xcm/src/lib.rs | 108 +++++++++-------- xcm/src/double_encoded.rs | 8 +- xcm/src/lib.rs | 12 +- xcm/src/v2/junction.rs | 16 +-- xcm/src/v2/mod.rs | 78 ++++++------- xcm/src/v2/multiasset.rs | 76 ++++++------ xcm/src/v2/multilocation.rs | 29 ++--- xcm/src/v2/traits.rs | 31 ++--- xcm/src/v3/junction.rs | 39 ++++--- xcm/src/v3/junctions.rs | 18 +-- xcm/src/v3/mod.rs | 69 ++++++----- xcm/src/v3/multiasset.rs | 57 +++++---- xcm/src/v3/multilocation.rs | 14 ++- xcm/src/v3/traits.rs | 9 +- xcm/xcm-builder/src/asset_conversion.rs | 6 +- xcm/xcm-builder/src/currency_adapter.rs | 4 +- xcm/xcm-builder/src/fungibles_adapter.rs | 4 +- xcm/xcm-builder/src/location_conversion.rs | 7 +- xcm/xcm-builder/src/origin_aliases.rs | 3 +- xcm/xcm-builder/src/origin_conversion.rs | 18 +-- xcm/xcm-builder/src/tests/assets.rs | 3 +- .../tests/bridging/paid_remote_relay_relay.rs | 6 +- xcm/xcm-builder/src/tests/mock.rs | 4 +- xcm/xcm-builder/src/tests/querying.rs | 3 +- xcm/xcm-builder/src/universal_exports.rs | 8 +- xcm/xcm-builder/src/weight.rs | 5 +- xcm/xcm-builder/tests/scenarios.rs | 4 +- xcm/xcm-executor/src/assets.rs | 47 ++++---- xcm/xcm-executor/src/lib.rs | 22 ++-- xcm/xcm-executor/src/traits/asset_exchange.rs | 4 +- xcm/xcm-executor/src/traits/asset_lock.rs | 4 +- xcm/xcm-executor/src/traits/conversion.rs | 6 +- .../src/traits/filter_asset_location.rs | 3 +- xcm/xcm-executor/src/traits/on_response.rs | 12 +- xcm/xcm-executor/src/traits/should_execute.rs | 4 +- xcm/xcm-executor/src/traits/transact_asset.rs | 53 +++++---- xcm/xcm-executor/src/traits/weight.rs | 8 +- xcm/xcm-simulator/src/lib.rs | 26 ++--- 203 files changed, 1880 insertions(+), 1504 deletions(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index 66b28b3485d8..4796a2c26965 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -29,4 +29,5 @@ rustflags = [ "-Aclippy::needless_option_as_deref", # false positives "-Aclippy::derivable_impls", # false positives "-Aclippy::stable_sort_primitive", # prefer stable sort + "-Aclippy::extra-unused-type-parameters", # stylistic ] diff --git a/cli/src/cli.rs b/cli/src/cli.rs index e78213cf11c8..c13340d91a04 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -130,8 +130,8 @@ pub struct RunCmd { pub overseer_channel_capacity_override: Option, /// Path to the directory where auxiliary worker binaries reside. If not specified, the main - /// binary's directory is searched first, then `/usr/lib/polkadot` is searched. TESTING ONLY: if - /// the path points to an executable rather then directory, that executable is used both as + /// binary's directory is searched first, then `/usr/lib/polkadot` is searched. TESTING ONLY: + /// if the path points to an executable rather then directory, that executable is used both as /// preparation and execution worker. #[arg(long, value_name = "PATH")] pub workers_path: Option, diff --git a/cli/src/command.rs b/cli/src/command.rs index c8e8673c6d70..c75f96ee2ebf 100644 --- a/cli/src/command.rs +++ b/cli/src/command.rs @@ -148,8 +148,8 @@ impl SubstrateCli for Cli { let chain_spec = Box::new(service::PolkadotChainSpec::from_json_file(path.clone())?) as Box; - // When `force_*` is given or the file name starts with the name of one of the known chains, - // we use the chain spec for the specific chain. + // When `force_*` is given or the file name starts with the name of one of the known + // chains, we use the chain spec for the specific chain. if self.run.force_rococo || chain_spec.is_rococo() || chain_spec.is_wococo() || diff --git a/core-primitives/src/lib.rs b/core-primitives/src/lib.rs index 5e06966ecfee..aa01cf8dfc45 100644 --- a/core-primitives/src/lib.rs +++ b/core-primitives/src/lib.rs @@ -91,10 +91,10 @@ impl sp_std::fmt::Debug for CandidateHash { pub type Nonce = u32; /// The balance of an account. -/// 128-bits (or 38 significant decimal figures) will allow for 10 m currency (`10^7`) at a resolution -/// to all for one second's worth of an annualised 50% reward be paid to a unit holder (`10^11` unit -/// denomination), or `10^18` total atomic units, to grow at 50%/year for 51 years (`10^9` multiplier) -/// for an eventual total of `10^27` units (27 significant decimal figures). +/// 128-bits (or 38 significant decimal figures) will allow for 10 m currency (`10^7`) at a +/// resolution to all for one second's worth of an annualised 50% reward be paid to a unit holder +/// (`10^11` unit denomination), or `10^18` total atomic units, to grow at 50%/year for 51 years +/// (`10^9` multiplier) for an eventual total of `10^27` units (27 significant decimal figures). /// We round denomination to `10^12` (12 SDF), and leave the other redundancy at the upper end so /// that 32 bits may be multiplied with a balance in 128 bits without worrying about overflow. pub type Balance = u128; @@ -121,8 +121,8 @@ pub type Remark = [u8; 32]; /// The size of the message is limited by the `config.max_downward_message_size` parameter. pub type DownwardMessage = sp_std::vec::Vec; -/// A wrapped version of `DownwardMessage`. The difference is that it has attached the block number when -/// the message was sent. +/// A wrapped version of `DownwardMessage`. The difference is that it has attached the block number +/// when the message was sent. #[derive(Encode, Decode, Clone, sp_runtime::RuntimeDebug, PartialEq, TypeInfo)] pub struct InboundDownwardMessage { /// The block number at which these messages were put into the downward message queue. diff --git a/node/collation-generation/src/lib.rs b/node/collation-generation/src/lib.rs index 02a0e8df8f61..8726ebf44c71 100644 --- a/node/collation-generation/src/lib.rs +++ b/node/collation-generation/src/lib.rs @@ -22,9 +22,11 @@ //! //! * If there is no collation generation config, ignore. //! * Otherwise, for each `activated` head in the update: -//! * Determine if the para is scheduled on any core by fetching the `availability_cores` Runtime API. +//! * Determine if the para is scheduled on any core by fetching the `availability_cores` Runtime +//! API. //! * Use the Runtime API subsystem to fetch the full validation data. -//! * Invoke the `collator`, and use its outputs to produce a [`CandidateReceipt`], signed with the configuration's `key`. +//! * Invoke the `collator`, and use its outputs to produce a [`CandidateReceipt`], signed with +//! the configuration's `key`. //! * Dispatch a [`CollatorProtocolMessage::DistributeCollation`]`(receipt, pov)`. #![deny(missing_docs)] @@ -77,8 +79,8 @@ impl CollationGenerationSubsystem { /// Conceptually, this is very simple: it just loops forever. /// /// - On incoming overseer messages, it starts or stops jobs as appropriate. - /// - On other incoming messages, if they can be converted into `Job::ToJob` and - /// include a hash, then they're forwarded to the appropriate individual job. + /// - On other incoming messages, if they can be converted into `Job::ToJob` and include a hash, + /// then they're forwarded to the appropriate individual job. /// - On outgoing messages from the jobs, it forwards them to the overseer. /// /// If `err_tx` is not `None`, errors are forwarded onto that channel as they occur. @@ -109,9 +111,10 @@ impl CollationGenerationSubsystem { } // handle an incoming message. return true if we should break afterwards. - // note: this doesn't strictly need to be a separate function; it's more an administrative function - // so that we don't clutter the run loop. It could in principle be inlined directly into there. - // it should hopefully therefore be ok that it's an async function mutably borrowing self. + // note: this doesn't strictly need to be a separate function; it's more an administrative + // function so that we don't clutter the run loop. It could in principle be inlined directly + // into there. it should hopefully therefore be ok that it's an async function mutably borrowing + // self. async fn handle_incoming( &mut self, incoming: SubsystemResult::Message>>, @@ -319,8 +322,9 @@ async fn handle_new_activations( // As long as `POV_BOMB_LIMIT` is at least `max_pov_size`, this ensures // that honest collators never produce a PoV which is uncompressed. // - // As such, honest collators never produce an uncompressed PoV which starts with - // a compression magic number, which would lead validators to reject the collation. + // As such, honest collators never produce an uncompressed PoV which starts + // with a compression magic number, which would lead validators to reject + // the collation. if encoded_size > validation_data.max_pov_size as usize { gum::debug!( target: LOG_TARGET, diff --git a/node/collation-generation/src/tests.rs b/node/collation-generation/src/tests.rs index b2534bcf36c1..1c98e1450941 100644 --- a/node/collation-generation/src/tests.rs +++ b/node/collation-generation/src/tests.rs @@ -203,9 +203,9 @@ mod handle_new_activations { .into_inner(); // the only activated hash should be from the 4 hash: - // each activated hash generates two scheduled cores: one with its value * 4, one with its value * 5 - // given that the test configuration has a `para_id` of 16, there's only one way to get that value: with the 4 - // hash. + // each activated hash generates two scheduled cores: one with its value * 4, one with its + // value * 5 given that the test configuration has a `para_id` of 16, there's only one way + // to get that value: with the 4 hash. assert_eq!(requested_validation_data, vec![[4; 32].into()]); } @@ -301,8 +301,8 @@ mod handle_new_activations { .into_inner(); // we expect a single message to be sent, containing a candidate receipt. - // we don't care too much about the `commitments_hash` right now, but let's ensure that we've calculated the - // correct descriptor + // we don't care too much about the `commitments_hash` right now, but let's ensure that + // we've calculated the correct descriptor let expect_pov_hash = test_collation_compressed().proof_of_validity.into_compressed().hash(); let expect_validation_data_hash = test_validation_data().hash(); diff --git a/node/core/approval-voting/src/approval_checking.rs b/node/core/approval-voting/src/approval_checking.rs index bfecdba73f88..f345b57029b5 100644 --- a/node/core/approval-voting/src/approval_checking.rs +++ b/node/core/approval-voting/src/approval_checking.rs @@ -42,8 +42,8 @@ pub enum RequiredTranches { /// assignments that are before the local time. maximum_broadcast: DelayTranche, /// The clock drift, in ticks, to apply to the local clock when determining whether - /// to broadcast an assignment or when to schedule a wakeup. The local clock should be treated - /// as though it is `clock_drift` ticks earlier. + /// to broadcast an assignment or when to schedule a wakeup. The local clock should be + /// treated as though it is `clock_drift` ticks earlier. clock_drift: Tick, }, /// An exact number of required tranches and a number of no-shows. This indicates that @@ -55,8 +55,8 @@ pub enum RequiredTranches { /// The amount of missing votes that should be tolerated. tolerated_missing: usize, /// When the next no-show would be, if any. This is used to schedule the next wakeup in the - /// event that there are some assignments that don't have corresponding approval votes. If this - /// is `None`, all assignments have approvals. + /// event that there are some assignments that don't have corresponding approval votes. If + /// this is `None`, all assignments have approvals. next_no_show: Option, /// The last tick at which a needed assignment was received. last_assignment_tick: Option, diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index 40a24e2dd937..0e1d18198c21 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -218,13 +218,14 @@ impl AssignmentCriteria for RealAssignmentCriteria { } /// Compute the assignments for a given block. Returns a map containing all assignments to cores in -/// the block. If more than one assignment targets the given core, only the earliest assignment is kept. +/// the block. If more than one assignment targets the given core, only the earliest assignment is +/// kept. /// -/// The `leaving_cores` parameter indicates all cores within the block where a candidate was included, -/// as well as the group index backing those. +/// The `leaving_cores` parameter indicates all cores within the block where a candidate was +/// included, as well as the group index backing those. /// -/// The current description of the protocol assigns every validator to check every core. But at different times. -/// The idea is that most assignments are never triggered and fall by the wayside. +/// The current description of the protocol assigns every validator to check every core. But at +/// different times. The idea is that most assignments are never triggered and fall by the wayside. /// /// This will not assign to anything the local validator was part of the backing group for. pub(crate) fn compute_assignments( @@ -463,8 +464,8 @@ pub(crate) enum InvalidAssignmentReason { /// * Sample is out of bounds /// * Validator is present in backing group. /// -/// This function does not check whether the core is actually a valid assignment or not. That should be done -/// outside the scope of this function. +/// This function does not check whether the core is actually a valid assignment or not. That should +/// be done outside the scope of this function. pub(crate) fn check_assignment_cert( claimed_core_index: CoreIndex, validator_index: ValidatorIndex, diff --git a/node/core/approval-voting/src/import.rs b/node/core/approval-voting/src/import.rs index e33caed49c5f..c504ba71b3c2 100644 --- a/node/core/approval-voting/src/import.rs +++ b/node/core/approval-voting/src/import.rs @@ -104,7 +104,8 @@ enum ImportedBlockInfoError { VrfInfoUnavailable, } -/// Computes information about the imported block. Returns an error if the info couldn't be extracted. +/// Computes information about the imported block. Returns an error if the info couldn't be +/// extracted. #[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] async fn imported_block_info( ctx: &mut Context, @@ -181,20 +182,21 @@ async fn imported_block_info( // It's not obvious whether to use the hash or the parent hash for this, intuitively. We // want to use the block hash itself, and here's why: // - // First off, 'epoch' in BABE means 'session' in other places. 'epoch' is the terminology from - // the paper, which we fulfill using 'session's, which are a Substrate consensus concept. + // First off, 'epoch' in BABE means 'session' in other places. 'epoch' is the terminology + // from the paper, which we fulfill using 'session's, which are a Substrate consensus + // concept. // - // In BABE, the on-chain and off-chain view of the current epoch can differ at epoch boundaries - // because epochs change precisely at a slot. When a block triggers a new epoch, the state of - // its parent will still have the old epoch. Conversely, we have the invariant that every - // block in BABE has the epoch _it was authored in_ within its post-state. So we use the - // block, and not its parent. + // In BABE, the on-chain and off-chain view of the current epoch can differ at epoch + // boundaries because epochs change precisely at a slot. When a block triggers a new epoch, + // the state of its parent will still have the old epoch. Conversely, we have the invariant + // that every block in BABE has the epoch _it was authored in_ within its post-state. So we + // use the block, and not its parent. // - // It's worth nothing that Polkadot session changes, at least for the purposes of parachains, - // would function the same way, except for the fact that they're always delayed by one block. - // This gives us the opposite invariant for sessions - the parent block's post-state gives - // us the canonical information about the session index for any of its children, regardless - // of which slot number they might be produced at. + // It's worth nothing that Polkadot session changes, at least for the purposes of + // parachains, would function the same way, except for the fact that they're always delayed + // by one block. This gives us the opposite invariant for sessions - the parent block's + // post-state gives us the canonical information about the session index for any of its + // children, regardless of which slot number they might be produced at. ctx.send_message(RuntimeApiMessage::Request( block_hash, RuntimeApiRequest::CurrentBabeEpoch(s_tx), diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index 05b92f459529..7e29e64c400a 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -1232,8 +1232,8 @@ async fn handle_from_overseer( ); // Our first wakeup will just be the tranche of our assignment, - // if any. This will likely be superseded by incoming assignments - // and approvals which trigger rescheduling. + // if any. This will likely be superseded by incoming + // assignments and approvals which trigger rescheduling. actions.push(Action::ScheduleWakeup { block_hash: block_batch.block_hash, block_number: block_batch.block_number, @@ -1256,12 +1256,14 @@ async fn handle_from_overseer( crate::ops::canonicalize(db, block_number, block_hash) .map_err(|e| SubsystemError::with_origin("db", e))?; - // `prune_finalized_wakeups` prunes all finalized block hashes. We prune spans accordingly. + // `prune_finalized_wakeups` prunes all finalized block hashes. We prune spans + // accordingly. wakeups.prune_finalized_wakeups(block_number, &mut state.spans); - // // `prune_finalized_wakeups` prunes all finalized block hashes. We prune spans accordingly. - // let hash_set = wakeups.block_numbers.values().flatten().collect::>(); - // state.spans.retain(|hash, _| hash_set.contains(hash)); + // // `prune_finalized_wakeups` prunes all finalized block hashes. We prune spans + // accordingly. let hash_set = + // wakeups.block_numbers.values().flatten().collect::>(); state.spans. + // retain(|hash, _| hash_set.contains(hash)); Vec::new() }, @@ -1403,8 +1405,8 @@ async fn get_approval_signatures_for_candidate( tx_distribution, )); - // Because of the unbounded sending and the nature of the call (just fetching data from state), - // this should not block long: + // Because of the unbounded sending and the nature of the call (just fetching data from + // state), this should not block long: match rx_distribution.timeout(WAIT_FOR_SIGS_TIMEOUT).await { None => { gum::warn!( @@ -2117,9 +2119,10 @@ impl ApprovalStateTransition { } } -// Advance the approval state, either by importing an approval vote which is already checked to be valid and corresponding to an assigned -// validator on the candidate and block, or by noting that there are no further wakeups or tranches needed. This updates the block entry and candidate entry as -// necessary and schedules any further wakeups. +// Advance the approval state, either by importing an approval vote which is already checked to be +// valid and corresponding to an assigned validator on the candidate and block, or by noting that +// there are no further wakeups or tranches needed. This updates the block entry and candidate entry +// as necessary and schedules any further wakeups. async fn advance_approval_state( sender: &mut Sender, state: &State, @@ -2251,7 +2254,8 @@ where // 1. This is not a local approval, as we don't store anything new in the approval entry. // 2. The candidate is not newly approved, as we haven't altered the approval entry's // approved flag with `mark_approved` above. - // 3. The approver, if any, had already approved the candidate, as we haven't altered the bitfield. + // 3. The approver, if any, had already approved the candidate, as we haven't altered the + // bitfield. if transition.is_local_approval() || newly_approved || !already_approved_by.unwrap_or(true) { // In all other cases, we need to write the candidate entry. @@ -2279,7 +2283,8 @@ fn should_trigger_assignment( &approval_entry, RequiredTranches::All, ) - .is_approved(Tick::max_value()), // when all are required, we are just waiting for the first 1/3+ + // when all are required, we are just waiting for the first 1/3+ + .is_approved(Tick::max_value()), RequiredTranches::Pending { maximum_broadcast, clock_drift, .. } => { let drifted_tranche_now = tranche_now.saturating_sub(clock_drift as DelayTranche); @@ -2615,8 +2620,8 @@ async fn launch_approval( match val_rx.await { Err(_) => return ApprovalState::failed(validator_index, candidate_hash), Ok(Ok(ValidationResult::Valid(_, _))) => { - // Validation checked out. Issue an approval command. If the underlying service is unreachable, - // then there isn't anything we can do. + // Validation checked out. Issue an approval command. If the underlying service is + // unreachable, then there isn't anything we can do. gum::trace!(target: LOG_TARGET, ?candidate_hash, ?para_id, "Candidate Valid"); diff --git a/node/core/approval-voting/src/ops.rs b/node/core/approval-voting/src/ops.rs index 4d6dc5e7ad66..6f57b2f80e8a 100644 --- a/node/core/approval-voting/src/ops.rs +++ b/node/core/approval-voting/src/ops.rs @@ -161,7 +161,8 @@ pub fn canonicalize( } } - // Update all blocks-at-height keys, deleting all those which now have empty `block_assignments`. + // Update all blocks-at-height keys, deleting all those which now have empty + // `block_assignments`. for (h, at) in visited_heights.into_iter() { if at.is_empty() { overlay_db.delete_blocks_at_height(h); @@ -170,8 +171,8 @@ pub fn canonicalize( } } - // due to the fork pruning, this range actually might go too far above where our actual highest block is, - // if a relatively short fork is canonicalized. + // due to the fork pruning, this range actually might go too far above where our actual highest + // block is, if a relatively short fork is canonicalized. // TODO https://github.com/paritytech/polkadot/issues/3389 let new_range = StoredBlockRange(canon_number + 1, std::cmp::max(range.1, canon_number + 2)); diff --git a/node/core/av-store/src/lib.rs b/node/core/av-store/src/lib.rs index 675d41b79c06..ef7dcecac075 100644 --- a/node/core/av-store/src/lib.rs +++ b/node/core/av-store/src/lib.rs @@ -67,8 +67,8 @@ const META_PREFIX: &[u8; 4] = b"meta"; const UNFINALIZED_PREFIX: &[u8; 11] = b"unfinalized"; const PRUNE_BY_TIME_PREFIX: &[u8; 13] = b"prune_by_time"; -// We have some keys we want to map to empty values because existence of the key is enough. We use this because -// rocksdb doesn't support empty values. +// We have some keys we want to map to empty values because existence of the key is enough. We use +// this because rocksdb doesn't support empty values. const TOMBSTONE_VALUE: &[u8] = b" "; /// Unavailable blocks are kept for 1 hour. @@ -139,10 +139,11 @@ enum State { /// Candidate data was first observed at the given time but is not available in any block. #[codec(index = 0)] Unavailable(BETimestamp), - /// The candidate was first observed at the given time and was included in the given list of unfinalized blocks, which may be - /// empty. The timestamp here is not used for pruning. Either one of these blocks will be finalized or the state will regress to - /// `State::Unavailable`, in which case the same timestamp will be reused. Blocks are sorted ascending first by block number and - /// then hash. + /// The candidate was first observed at the given time and was included in the given list of + /// unfinalized blocks, which may be empty. The timestamp here is not used for pruning. Either + /// one of these blocks will be finalized or the state will regress to `State::Unavailable`, in + /// which case the same timestamp will be reused. Blocks are sorted ascending first by block + /// number and then hash. #[codec(index = 1)] Unfinalized(BETimestamp, Vec<(BEBlockNumber, Hash)>), /// Candidate data has appeared in a finalized block and did so at the given time. @@ -820,8 +821,8 @@ fn note_block_included( match load_meta(db, config, &candidate_hash)? { None => { - // This is alarming. We've observed a block being included without ever seeing it backed. - // Warn and ignore. + // This is alarming. We've observed a block being included without ever seeing it + // backed. Warn and ignore. gum::warn!( target: LOG_TARGET, ?candidate_hash, @@ -894,9 +895,9 @@ async fn process_block_finalized( let mut db_transaction = DBTransaction::new(); let (start_prefix, end_prefix) = finalized_block_range(finalized_number); - // We have to do some juggling here of the `iter` to make sure it doesn't cross the `.await` boundary - // as it is not `Send`. That is why we create the iterator once within this loop, drop it, - // do an asynchronous request, and then instantiate the exact same iterator again. + // We have to do some juggling here of the `iter` to make sure it doesn't cross the `.await` + // boundary as it is not `Send`. That is why we create the iterator once within this loop, + // drop it, do an asynchronous request, and then instantiate the exact same iterator again. let batch_num = { let mut iter = subsystem .db @@ -961,8 +962,9 @@ async fn process_block_finalized( update_blocks_at_finalized_height(&subsystem, &mut db_transaction, batch, batch_num, now)?; - // We need to write at the end of the loop so the prefix iterator doesn't pick up the same values again - // in the next iteration. Another unfortunate effect of having to re-initialize the iterator. + // We need to write at the end of the loop so the prefix iterator doesn't pick up the same + // values again in the next iteration. Another unfortunate effect of having to re-initialize + // the iterator. subsystem.db.write(db_transaction)?; } @@ -1215,7 +1217,8 @@ fn process_message( // We do not bubble up internal errors to caller subsystems, instead the // tx channel is dropped and that error is caught by the caller subsystem. // - // We bubble up the specific error here so `av-store` logs still tell what happend. + // We bubble up the specific error here so `av-store` logs still tell what + // happend. return Err(e.into()) }, } @@ -1298,8 +1301,8 @@ fn store_available_data( .with_candidate(candidate_hash) .with_pov(&available_data.pov); - // Important note: This check below is critical for consensus and the `backing` subsystem relies on it to - // ensure candidate validity. + // Important note: This check below is critical for consensus and the `backing` subsystem relies + // on it to ensure candidate validity. let chunks = erasure::obtain_chunks_v1(n_validators, &available_data)?; let branches = erasure::branches(chunks.as_ref()); diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index dc0863cfa0b3..0abfbfad7657 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -422,7 +422,8 @@ struct CandidateBackingJob { awaiting_validation: HashSet, /// Data needed for retrying in case of `ValidatedCandidateCommand::AttestNoPoV`. fallbacks: HashMap)>, - /// `Some(h)` if this job has already issued `Seconded` statement for some candidate with `h` hash. + /// `Some(h)` if this job has already issued `Seconded` statement for some candidate with `h` + /// hash. seconded: Option, /// The candidates that are includable, by hash. Each entry here indicates /// that we've sent the provisioner the backed candidate. @@ -562,9 +563,10 @@ async fn store_available_data( expected_erasure_root: Hash, ) -> Result<(), Error> { let (tx, rx) = oneshot::channel(); - // Important: the `av-store` subsystem will check if the erasure root of the `available_data` matches `expected_erasure_root` - // which was provided by the collator in the `CandidateReceipt`. This check is consensus critical and the `backing` subsystem - // relies on it for ensuring candidate validity. + // Important: the `av-store` subsystem will check if the erasure root of the `available_data` + // matches `expected_erasure_root` which was provided by the collator in the `CandidateReceipt`. + // This check is consensus critical and the `backing` subsystem relies on it for ensuring + // candidate validity. sender .send_message(AvailabilityStoreMessage::StoreAvailableData { candidate_hash, @@ -582,8 +584,8 @@ async fn store_available_data( // Make a `PoV` available. // -// This calls the AV store to write the available data to storage. The AV store also checks the erasure root matches -// the `expected_erasure_root`. +// This calls the AV store to write the available data to storage. The AV store also checks the +// erasure root matches the `expected_erasure_root`. // This returns `Err()` on erasure root mismatch or due to any AV store subsystem error. // // Otherwise, it returns either `Ok(())` diff --git a/node/core/backing/src/metrics.rs b/node/core/backing/src/metrics.rs index 8468ea005404..77f0e7f9d92a 100644 --- a/node/core/backing/src/metrics.rs +++ b/node/core/backing/src/metrics.rs @@ -54,7 +54,8 @@ impl Metrics { self.0.as_ref().map(|metrics| metrics.process_statement.start_timer()) } - /// Provide a timer for handling `CandidateBackingMessage::GetBackedCandidates` which observes on drop. + /// Provide a timer for handling `CandidateBackingMessage::GetBackedCandidates` which observes + /// on drop. pub fn time_get_backed_candidates( &self, ) -> Option { diff --git a/node/core/backing/src/tests.rs b/node/core/backing/src/tests.rs index 35c83297fa71..386cc9e2279e 100644 --- a/node/core/backing/src/tests.rs +++ b/node/core/backing/src/tests.rs @@ -84,7 +84,8 @@ impl Default for TestState { ]; let keystore = Arc::new(sc_keystore::LocalKeystore::in_memory()); - // Make sure `Alice` key is in the keystore, so this mocked node will be a parachain validator. + // Make sure `Alice` key is in the keystore, so this mocked node will be a parachain + // validator. Keystore::sr25519_generate_new(&*keystore, ValidatorId::ID, Some(&validators[0].to_seed())) .expect("Insert key into keystore"); diff --git a/node/core/bitfield-signing/src/lib.rs b/node/core/bitfield-signing/src/lib.rs index 1e4d556de7ca..f29e827e1090 100644 --- a/node/core/bitfield-signing/src/lib.rs +++ b/node/core/bitfield-signing/src/lib.rs @@ -137,8 +137,8 @@ async fn get_availability_cores( /// - get the list of core states from the runtime /// - for each core, concurrently determine chunk availability (see `get_core_availability`) -/// - return the bitfield if there were no errors at any point in this process -/// (otherwise, it's prone to false negatives) +/// - return the bitfield if there were no errors at any point in this process (otherwise, it's +/// prone to false negatives) async fn construct_availability_bitfield( relay_parent: Hash, span: &jaeger::Span, diff --git a/node/core/candidate-validation/src/lib.rs b/node/core/candidate-validation/src/lib.rs index 93a7e05c8724..f53f2a6aee06 100644 --- a/node/core/candidate-validation/src/lib.rs +++ b/node/core/candidate-validation/src/lib.rs @@ -67,15 +67,15 @@ mod tests; const LOG_TARGET: &'static str = "parachain::candidate-validation"; -/// The amount of time to wait before retrying after a retry-able backing validation error. We use a lower value for the -/// backing case, to fit within the lower backing timeout. +/// The amount of time to wait before retrying after a retry-able backing validation error. We use a +/// lower value for the backing case, to fit within the lower backing timeout. #[cfg(not(test))] const PVF_BACKING_EXECUTION_RETRY_DELAY: Duration = Duration::from_millis(500); #[cfg(test)] const PVF_BACKING_EXECUTION_RETRY_DELAY: Duration = Duration::from_millis(200); -/// The amount of time to wait before retrying after a retry-able approval validation error. We use a higher value for -/// the approval case since we have more time, and if we wait longer it is more likely that transient conditions will -/// resolve. +/// The amount of time to wait before retrying after a retry-able approval validation error. We use +/// a higher value for the approval case since we have more time, and if we wait longer it is more +/// likely that transient conditions will resolve. #[cfg(not(test))] const PVF_APPROVAL_EXECUTION_RETRY_DELAY: Duration = Duration::from_secs(3); #[cfg(test)] @@ -451,9 +451,9 @@ where const ASSUMPTIONS: &[OccupiedCoreAssumption] = &[ OccupiedCoreAssumption::Included, OccupiedCoreAssumption::TimedOut, - // `TimedOut` and `Free` both don't perform any speculation and therefore should be the same - // for our purposes here. In other words, if `TimedOut` matched then the `Free` must be - // matched as well. + // `TimedOut` and `Free` both don't perform any speculation and therefore should be the + // same for our purposes here. In other words, if `TimedOut` matched then the `Free` must + // be matched as well. ]; // Consider running these checks in parallel to reduce validation latency. @@ -482,9 +482,10 @@ where AssumptionCheckOutcome::Matches(validation_data, validation_code) => Ok(Some((validation_data, validation_code))), AssumptionCheckOutcome::DoesNotMatch => { - // If neither the assumption of the occupied core having the para included or the assumption - // of the occupied core timing out are valid, then the persisted_validation_data_hash in the descriptor - // is not based on the relay parent and is thus invalid. + // If neither the assumption of the occupied core having the para included or the + // assumption of the occupied core timing out are valid, then the + // persisted_validation_data_hash in the descriptor is not based on the relay parent and + // is thus invalid. Ok(None) }, AssumptionCheckOutcome::BadRequest => @@ -704,7 +705,8 @@ where "Invalid candidate (commitments hash)" ); - // If validation produced a new set of commitments, we treat the candidate as invalid. + // If validation produced a new set of commitments, we treat the candidate as + // invalid. Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch)) } else { Ok(ValidationResult::Valid(outputs, persisted_validation_data)) @@ -744,7 +746,8 @@ trait ValidationBackend { prep_timeout, PrepareJobKind::Compilation, ); - // We keep track of the total time that has passed and stop retrying if we are taking too long. + // We keep track of the total time that has passed and stop retrying if we are taking too + // long. let total_time_start = Instant::now(); let mut validation_result = @@ -780,8 +783,8 @@ trait ValidationBackend { _ => break, } - // If we got a possibly transient error, retry once after a brief delay, on the assumption - // that the conditions that caused this error may have resolved on their own. + // If we got a possibly transient error, retry once after a brief delay, on the + // assumption that the conditions that caused this error may have resolved on their own. { // Wait a brief delay before retrying. futures_timer::Delay::new(retry_delay).await; diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index 4b512347dae4..aa5bb9548ad2 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -44,13 +44,15 @@ mod tree; mod tests; const LOG_TARGET: &str = "parachain::chain-selection"; -/// Timestamp based on the 1 Jan 1970 UNIX base, which is persistent across node restarts and OS reboots. +/// Timestamp based on the 1 Jan 1970 UNIX base, which is persistent across node restarts and OS +/// reboots. type Timestamp = u64; // If a block isn't approved in 120 seconds, nodes will abandon it // and begin building on another chain. const STAGNANT_TIMEOUT: Timestamp = 120; -// Delay prunning of the stagnant keys in prune only mode by 25 hours to avoid interception with the finality +// Delay prunning of the stagnant keys in prune only mode by 25 hours to avoid interception with the +// finality const STAGNANT_PRUNE_DELAY: Timestamp = 25 * 60 * 60; // Maximum number of stagnant entries cleaned during one `STAGNANT_TIMEOUT` iteration const MAX_STAGNANT_ENTRIES: usize = 1000; diff --git a/node/core/dispute-coordinator/src/db/v1.rs b/node/core/dispute-coordinator/src/db/v1.rs index 2d14f5151003..f0f17d2325d6 100644 --- a/node/core/dispute-coordinator/src/db/v1.rs +++ b/node/core/dispute-coordinator/src/db/v1.rs @@ -52,8 +52,8 @@ const CLEANED_VOTES_WATERMARK_KEY: &[u8; 23] = b"cleaned-votes-watermark"; /// this should not be done at once, but rather in smaller batches so nodes won't get stalled by /// this. /// -/// 300 is with session duration of 1 hour and 30 parachains around <3_000_000 key purges in the worst -/// case. Which is already quite a lot, at the same time we have around 21_000 sessions on +/// 300 is with session duration of 1 hour and 30 parachains around <3_000_000 key purges in the +/// worst case. Which is already quite a lot, at the same time we have around 21_000 sessions on /// Kusama. This means at 300 purged sessions per session, cleaning everything up will take /// around 3 days. Depending on how severe disk usage becomes, we might want to bump the batch /// size, at the cost of risking issues at session boundaries (performance). @@ -346,7 +346,8 @@ pub(crate) fn note_earliest_session( if pruned_disputes.len() != 0 { overlay_db.write_recent_disputes(new_recent_disputes); - // Note: Deleting old candidate votes is handled in `write` based on the earliest session. + // Note: Deleting old candidate votes is handled in `write` based on the + // earliest session. } } }, diff --git a/node/core/dispute-coordinator/src/import.rs b/node/core/dispute-coordinator/src/import.rs index 912521834075..0da3723ebf22 100644 --- a/node/core/dispute-coordinator/src/import.rs +++ b/node/core/dispute-coordinator/src/import.rs @@ -19,12 +19,12 @@ //! This module encapsulates the actual logic for importing new votes and provides easy access of //! the current state for votes for a particular candidate. //! -//! In particular there is `CandidateVoteState` which tells what can be concluded for a particular set of -//! votes. E.g. whether a dispute is ongoing, whether it is confirmed, concluded, .. +//! In particular there is `CandidateVoteState` which tells what can be concluded for a particular +//! set of votes. E.g. whether a dispute is ongoing, whether it is confirmed, concluded, .. //! //! Then there is `ImportResult` which reveals information about what changed once additional votes -//! got imported on top of an existing `CandidateVoteState` and reveals "dynamic" information, like whether -//! due to the import a dispute was raised/got confirmed, ... +//! got imported on top of an existing `CandidateVoteState` and reveals "dynamic" information, like +//! whether due to the import a dispute was raised/got confirmed, ... use std::collections::{BTreeMap, HashMap, HashSet}; diff --git a/node/core/dispute-coordinator/src/initialized.rs b/node/core/dispute-coordinator/src/initialized.rs index 2a1d8fd4b83c..c1d02ef976cb 100644 --- a/node/core/dispute-coordinator/src/initialized.rs +++ b/node/core/dispute-coordinator/src/initialized.rs @@ -92,8 +92,8 @@ pub struct InitialData { pub(crate) struct Initialized { keystore: Arc, runtime_info: RuntimeInfo, - /// This is the highest `SessionIndex` seen via `ActiveLeavesUpdate`. It doesn't matter if it was - /// cached successfully or not. It is used to detect ancient disputes. + /// This is the highest `SessionIndex` seen via `ActiveLeavesUpdate`. It doesn't matter if it + /// was cached successfully or not. It is used to detect ancient disputes. highest_session_seen: SessionIndex, /// Will be set to `true` if an error occured during the last caching attempt gaps_in_cache: bool, @@ -308,8 +308,8 @@ impl Initialized { Ok(session_idx) if self.gaps_in_cache || session_idx > self.highest_session_seen => { - // Fetch the last `DISPUTE_WINDOW` number of sessions unless there are no gaps in - // cache and we are not missing too many `SessionInfo`s + // Fetch the last `DISPUTE_WINDOW` number of sessions unless there are no gaps + // in cache and we are not missing too many `SessionInfo`s let mut lower_bound = session_idx.saturating_sub(DISPUTE_WINDOW.get() - 1); if !self.gaps_in_cache && self.highest_session_seen > lower_bound { lower_bound = self.highest_session_seen + 1 @@ -1133,8 +1133,8 @@ impl Initialized { } // Participate in dispute if we did not cast a vote before and actually have keys to cast a - // local vote. Disputes should fall in one of the categories below, otherwise we will refrain - // from participation: + // local vote. Disputes should fall in one of the categories below, otherwise we will + // refrain from participation: // - `is_included` lands in prioritised queue // - `is_confirmed` | `is_backed` lands in best effort queue // We don't participate in disputes on finalized candidates. diff --git a/node/core/dispute-coordinator/src/lib.rs b/node/core/dispute-coordinator/src/lib.rs index 02bb6ef9ecda..a2c500e08e28 100644 --- a/node/core/dispute-coordinator/src/lib.rs +++ b/node/core/dispute-coordinator/src/lib.rs @@ -17,12 +17,13 @@ //! Implements the dispute coordinator subsystem. //! //! This is the central subsystem of the node-side components which participate in disputes. -//! This subsystem wraps a database which tracks all statements observed by all validators over some window of sessions. -//! Votes older than this session window are pruned. +//! This subsystem wraps a database which tracks all statements observed by all validators over some +//! window of sessions. Votes older than this session window are pruned. //! -//! This subsystem will be the point which produce dispute votes, either positive or negative, based on locally-observed -//! validation results as well as a sink for votes received by other subsystems. When importing a dispute vote from -//! another node, this will trigger dispute participation to recover and validate the block. +//! This subsystem will be the point which produce dispute votes, either positive or negative, based +//! on locally-observed validation results as well as a sink for votes received by other subsystems. +//! When importing a dispute vote from another node, this will trigger dispute participation to +//! recover and validate the block. use std::{num::NonZeroUsize, sync::Arc}; @@ -92,10 +93,10 @@ mod spam_slots; /// Handling of participation requests via `Participation`. /// -/// `Participation` provides an API (`Participation::queue_participation`) for queuing of dispute participations and will process those -/// participation requests, such that most important/urgent disputes will be resolved and processed -/// first and more importantly it will order requests in a way so disputes will get resolved, even -/// if there are lots of them. +/// `Participation` provides an API (`Participation::queue_participation`) for queuing of dispute +/// participations and will process those participation requests, such that most important/urgent +/// disputes will be resolved and processed first and more importantly it will order requests in a +/// way so disputes will get resolved, even if there are lots of them. pub(crate) mod participation; /// Pure processing of vote imports. diff --git a/node/core/dispute-coordinator/src/participation/queues/mod.rs b/node/core/dispute-coordinator/src/participation/queues/mod.rs index 4d8ee585ea29..8a4374999f88 100644 --- a/node/core/dispute-coordinator/src/participation/queues/mod.rs +++ b/node/core/dispute-coordinator/src/participation/queues/mod.rs @@ -294,8 +294,8 @@ impl Queues { return Self::pop_impl(&mut self.priority) } - // `pop_best_effort` and `pop_priority` do the same but on different `BTreeMap`s. This function has - // the extracted implementation + // `pop_best_effort` and `pop_priority` do the same but on different `BTreeMap`s. This function + // has the extracted implementation fn pop_impl( target: &mut BTreeMap, ) -> Option<(CandidateComparator, ParticipationRequest)> { @@ -331,9 +331,10 @@ impl Queues { #[derive(Copy, Clone)] #[cfg_attr(test, derive(Debug))] struct CandidateComparator { - /// Block number of the relay parent. It's wrapped in an `Option<>` because there are cases when - /// it can't be obtained. For example when the node is lagging behind and new leaves are received - /// with a slight delay. Candidates with unknown relay parent are treated with the lowest priority. + /// Block number of the relay parent. It's wrapped in an `Option<>` because there are cases + /// when it can't be obtained. For example when the node is lagging behind and new leaves are + /// received with a slight delay. Candidates with unknown relay parent are treated with the + /// lowest priority. /// /// The order enforced by `CandidateComparator` is important because we want to participate in /// the oldest disputes first. @@ -346,9 +347,10 @@ struct CandidateComparator { /// that is not stable. If a new fork appears after the fact, we would start ordering the same /// candidate differently, which would result in the same candidate getting queued twice. relay_parent_block_number: Option, - /// By adding the `CandidateHash`, we can guarantee a unique ordering across candidates with the - /// same relay parent block number. Candidates without `relay_parent_block_number` are ordered by - /// the `candidate_hash` (and treated with the lowest priority, as already mentioned). + /// By adding the `CandidateHash`, we can guarantee a unique ordering across candidates with + /// the same relay parent block number. Candidates without `relay_parent_block_number` are + /// ordered by the `candidate_hash` (and treated with the lowest priority, as already + /// mentioned). candidate_hash: CandidateHash, } @@ -364,11 +366,11 @@ impl CandidateComparator { /// Create a candidate comparator for a given candidate. /// /// Returns: - /// - `Ok(CandidateComparator{Some(relay_parent_block_number), candidate_hash})` when the + /// - `Ok(CandidateComparator{Some(relay_parent_block_number), candidate_hash})` when the /// relay parent can be obtained. This is the happy case. /// - `Ok(CandidateComparator{None, candidate_hash})` in case the candidate's relay parent /// can't be obtained. - /// - `FatalError` in case the chain API call fails with an unexpected error. + /// - `FatalError` in case the chain API call fails with an unexpected error. pub async fn new( sender: &mut impl overseer::DisputeCoordinatorSenderTrait, candidate: &CandidateReceipt, diff --git a/node/core/dispute-coordinator/src/participation/queues/tests.rs b/node/core/dispute-coordinator/src/participation/queues/tests.rs index 8293a935d11a..5e262d895e31 100644 --- a/node/core/dispute-coordinator/src/participation/queues/tests.rs +++ b/node/core/dispute-coordinator/src/participation/queues/tests.rs @@ -53,8 +53,8 @@ fn clone_request(request: &ParticipationRequest) -> ParticipationRequest { /// Check that dequeuing acknowledges order. /// /// Any priority item will be dequeued before any best effort items, priority and best effort with -/// known parent block number items will be processed in order. Best effort items without known parent -/// block number should be treated with lowest priority. +/// known parent block number items will be processed in order. Best effort items without known +/// parent block number should be treated with lowest priority. #[test] fn ordering_works_as_expected() { let metrics = Metrics::default(); diff --git a/node/core/dispute-coordinator/src/participation/tests.rs b/node/core/dispute-coordinator/src/participation/tests.rs index ab58db4e7628..32725a3ac658 100644 --- a/node/core/dispute-coordinator/src/participation/tests.rs +++ b/node/core/dispute-coordinator/src/participation/tests.rs @@ -305,7 +305,8 @@ fn reqs_get_queued_on_no_recent_block() { // Responds to messages from the test and verifies its behaviour let request_handler = async { - // If we receive `BlockNumber` request this implicitly proves that the participation is queued + // If we receive `BlockNumber` request this implicitly proves that the participation is + // queued assert_matches!( ctx_handle.recv().await, AllMessages::ChainApi(ChainApiMessage::BlockNumber(_, tx)) => { diff --git a/node/core/dispute-coordinator/src/scraping/candidates.rs b/node/core/dispute-coordinator/src/scraping/candidates.rs index 89323907a732..38956700545c 100644 --- a/node/core/dispute-coordinator/src/scraping/candidates.rs +++ b/node/core/dispute-coordinator/src/scraping/candidates.rs @@ -98,7 +98,8 @@ mod ref_counted_candidates_tests { /// Keeps track of scraped candidates. Supports `insert`, `remove_up_to_height` and `contains` /// operations. pub struct ScrapedCandidates { - /// Main data structure which keeps the candidates we know about. `contains` does lookups only here. + /// Main data structure which keeps the candidates we know about. `contains` does lookups only + /// here. candidates: RefCountedCandidates, /// Keeps track at which block number a candidate was inserted. Used in `remove_up_to_height`. /// Without this tracking we won't be able to remove all candidates before block X. @@ -117,7 +118,8 @@ impl ScrapedCandidates { self.candidates.contains(candidate_hash) } - // Removes all candidates up to a given height. The candidates at the block height are NOT removed. + // Removes all candidates up to a given height. The candidates at the block height are NOT + // removed. pub fn remove_up_to_height(&mut self, height: &BlockNumber) -> HashSet { let mut candidates_modified: HashSet = HashSet::new(); let not_stale = self.candidates_by_block_number.split_off(&height); diff --git a/node/core/dispute-coordinator/src/scraping/mod.rs b/node/core/dispute-coordinator/src/scraping/mod.rs index a1e385b5ff85..f93ad0abab91 100644 --- a/node/core/dispute-coordinator/src/scraping/mod.rs +++ b/node/core/dispute-coordinator/src/scraping/mod.rs @@ -120,7 +120,8 @@ impl Inclusions { ) { for candidate in candidates_modified { if let Some(blocks_including) = self.inclusions_inner.get_mut(&candidate) { - // Returns everything after the given key, including the key. This works because the blocks are sorted in ascending order. + // Returns everything after the given key, including the key. This works because the + // blocks are sorted in ascending order. *blocks_including = blocks_including.split_off(height); } } @@ -150,8 +151,8 @@ impl Inclusions { /// /// Concretely: /// -/// - Monitors for `CandidateIncluded` events to keep track of candidates that have been -/// included on chains. +/// - Monitors for `CandidateIncluded` events to keep track of candidates that have been included on +/// chains. /// - Monitors for `CandidateBacked` events to keep track of all backed candidates. /// - Calls `FetchOnChainVotes` for each block to gather potentially missed votes from chain. /// @@ -294,11 +295,11 @@ impl ChainScraper { /// Prune finalized candidates. /// - /// We keep each candidate for `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION` blocks after finalization. - /// After that we treat it as low priority. + /// We keep each candidate for `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION` blocks after + /// finalization. After that we treat it as low priority. pub fn process_finalized_block(&mut self, finalized_block_number: &BlockNumber) { - // `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION - 1` because `finalized_block_number`counts to the - // candidate lifetime. + // `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION - 1` because + // `finalized_block_number`counts to the candidate lifetime. match finalized_block_number.checked_sub(DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION - 1) { Some(key_to_prune) => { diff --git a/node/core/dispute-coordinator/src/scraping/tests.rs b/node/core/dispute-coordinator/src/scraping/tests.rs index 57e0731056b7..d938304a9e97 100644 --- a/node/core/dispute-coordinator/src/scraping/tests.rs +++ b/node/core/dispute-coordinator/src/scraping/tests.rs @@ -183,7 +183,8 @@ fn get_backed_candidate_event(block_number: BlockNumber) -> Vec GroupIndex::from(0), )] } -/// Hash for a 'magic' candidate. This is meant to be a special candidate used to verify special cases. +/// Hash for a 'magic' candidate. This is meant to be a special candidate used to verify special +/// cases. fn get_magic_candidate_hash() -> Hash { BlakeTwo256::hash(&"abc".encode()) } @@ -425,7 +426,7 @@ fn scraper_requests_candidates_of_non_finalized_ancestors() { &chain, finalized_block_number, BLOCKS_TO_SKIP - - (finalized_block_number - DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION) as usize, // Expect the provider not to go past finalized block. + (finalized_block_number - DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION) as usize, /* Expect the provider not to go past finalized block. */ get_backed_and_included_candidate_events, ); join(process_active_leaves_update(ctx.sender(), &mut ordering, next_update), overseer_fut) @@ -468,7 +469,8 @@ fn scraper_prunes_finalized_candidates() { let candidate = make_candidate_receipt(get_block_number_hash(TEST_TARGET_BLOCK_NUMBER)); - // After `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION` blocks the candidate should be removed + // After `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION` blocks the candidate should be + // removed finalized_block_number = TEST_TARGET_BLOCK_NUMBER + DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION; process_finalized_block(&mut scraper, &finalized_block_number); @@ -518,8 +520,9 @@ fn scraper_handles_backed_but_not_included_candidate() { finalized_block_number += 1; process_finalized_block(&mut scraper, &finalized_block_number); - // `FIRST_TEST_BLOCK` is finalized, which is within `BACKED_CANDIDATE_LIFETIME_AFTER_FINALIZATION` window. - // The candidate should still be backed. + // `FIRST_TEST_BLOCK` is finalized, which is within + // `BACKED_CANDIDATE_LIFETIME_AFTER_FINALIZATION` window. The candidate should still be + // backed. let candidate = make_candidate_receipt(get_block_number_hash(TEST_TARGET_BLOCK_NUMBER)); assert!(!scraper.is_candidate_included(&candidate.hash())); assert!(scraper.is_candidate_backed(&candidate.hash())); @@ -576,7 +579,8 @@ fn scraper_handles_the_same_candidate_incuded_in_two_different_block_heights() { .await; // Finalize blocks to enforce pruning of scraped events. - // The magic candidate was added twice, so it shouldn't be removed if we finalize two more blocks. + // The magic candidate was added twice, so it shouldn't be removed if we finalize two more + // blocks. finalized_block_number = test_targets.first().expect("there are two block nums") + DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION; process_finalized_block(&mut scraper, &finalized_block_number); @@ -641,7 +645,8 @@ fn inclusions_per_candidate_properly_adds_and_prunes() { ]) ); - // After `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION` blocks the earlier inclusion should be removed + // After `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION` blocks the earlier inclusion should + // be removed finalized_block_number = TEST_TARGET_BLOCK_NUMBER + DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION; process_finalized_block(&mut scraper, &finalized_block_number); diff --git a/node/core/dispute-coordinator/src/tests.rs b/node/core/dispute-coordinator/src/tests.rs index f2590aea1511..d0cf494d2d4d 100644 --- a/node/core/dispute-coordinator/src/tests.rs +++ b/node/core/dispute-coordinator/src/tests.rs @@ -734,8 +734,9 @@ fn too_many_unconfirmed_statements_are_considered_spam() { .await; // Participation has to fail here, otherwise the dispute will be confirmed. However - // participation won't happen at all because the dispute is neither backed, not confirmed - // nor the candidate is included. Or in other words - we'll refrain from participation. + // participation won't happen at all because the dispute is neither backed, not + // confirmed nor the candidate is included. Or in other words - we'll refrain from + // participation. { let (tx, rx) = oneshot::channel(); @@ -2050,7 +2051,8 @@ fn concluded_supermajority_against_non_active_after_time() { ImportStatementsResult::ValidImport => {} ); - // Use a different expected commitments hash to ensure the candidate validation returns invalid. + // Use a different expected commitments hash to ensure the candidate validation returns + // invalid. participation_with_distribution( &mut virtual_overseer, &candidate_hash, @@ -2351,7 +2353,8 @@ fn resume_dispute_with_local_statement() { assert_eq!(messages.len(), 1, "A message should have gone out."); - // Assert that subsystem is not sending Participation messages because we issued a local statement + // Assert that subsystem is not sending Participation messages because we issued a local + // statement assert!(virtual_overseer.recv().timeout(TEST_TIMEOUT).await.is_none()); virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; @@ -2445,7 +2448,8 @@ fn resume_dispute_without_local_statement_or_local_key() { Box::pin(async move { test_state.handle_resume_sync(&mut virtual_overseer, session).await; - // Assert that subsystem is not sending Participation messages because we issued a local statement + // Assert that subsystem is not sending Participation messages because we issued a + // local statement assert!(virtual_overseer.recv().timeout(TEST_TIMEOUT).await.is_none()); virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; @@ -2751,7 +2755,8 @@ fn redundant_votes_ignored() { } #[test] -/// Make sure no disputes are recorded when there are no opposing votes, even if we reached supermajority. +/// Make sure no disputes are recorded when there are no opposing votes, even if we reached +/// supermajority. fn no_onesided_disputes() { test_harness(|mut test_state, mut virtual_overseer| { Box::pin(async move { @@ -3124,16 +3129,17 @@ fn participation_requests_reprioritized_for_newly_included() { candidate_receipt.descriptor.pov_hash = Hash::from( [repetition; 32], // Altering this receipt so its hash will be changed ); - // Set consecutive parents (starting from zero). They will order the candidates for participation. + // Set consecutive parents (starting from zero). They will order the candidates for + // participation. let parent_block_num: BlockNumber = repetition as BlockNumber - 1; candidate_receipt.descriptor.relay_parent = test_state.block_num_to_header.get(&parent_block_num).unwrap().clone(); receipts.push(candidate_receipt.clone()); } - // Mark all candidates as backed, so their participation requests make it to best effort. - // These calls must all occur before including the candidates due to test overseer - // oddities. + // Mark all candidates as backed, so their participation requests make it to best + // effort. These calls must all occur before including the candidates due to test + // overseer oddities. let mut candidate_events = Vec::new(); for r in receipts.iter() { candidate_events.push(make_candidate_backed_event(r.clone())) @@ -3172,7 +3178,8 @@ fn participation_requests_reprioritized_for_newly_included() { .await; // Handle corresponding messages to unblock import - // we need to handle `ApprovalVotingMessage::GetApprovalSignaturesForCandidate` for import + // we need to handle `ApprovalVotingMessage::GetApprovalSignaturesForCandidate` for + // import handle_approval_vote_request( &mut virtual_overseer, &candidate_hash, @@ -3180,8 +3187,9 @@ fn participation_requests_reprioritized_for_newly_included() { ) .await; - // We'll trigger participation for the first `MAX_PARALLEL_PARTICIPATIONS` candidates. - // The rest will be queued => we need to handle `ChainApiMessage::BlockNumber` for them. + // We'll trigger participation for the first `MAX_PARALLEL_PARTICIPATIONS` + // candidates. The rest will be queued => we need to handle + // `ChainApiMessage::BlockNumber` for them. if idx >= crate::participation::MAX_PARALLEL_PARTICIPATIONS { // We send the `idx` as parent block number, because it is used for ordering. // This way we get predictable ordering and participation. @@ -3201,11 +3209,13 @@ fn participation_requests_reprioritized_for_newly_included() { ) .await; - // NB: The checks below are a bit racy. In theory candidate 2 can be processed even before candidate 0 and this is okay. If any - // of the asserts in the two functions after this comment fail -> rework `participation_with_distribution` to expect a set of + // NB: The checks below are a bit racy. In theory candidate 2 can be processed even + // before candidate 0 and this is okay. If any of the asserts in the two functions after + // this comment fail -> rework `participation_with_distribution` to expect a set of // commitment hashes instead of just one. - // This is the candidate for which participation was started initially (`MAX_PARALLEL_PARTICIPATIONS` threshold was not yet hit) + // This is the candidate for which participation was started initially + // (`MAX_PARALLEL_PARTICIPATIONS` threshold was not yet hit) participation_with_distribution( &mut virtual_overseer, &receipts.get(0).expect("There is more than one candidate").hash(), @@ -3326,7 +3336,8 @@ fn informs_chain_selection_when_dispute_concluded_against() { ImportStatementsResult::ValidImport => {} ); - // Use a different expected commitments hash to ensure the candidate validation returns invalid. + // Use a different expected commitments hash to ensure the candidate validation returns + // invalid. participation_with_distribution( &mut virtual_overseer, &candidate_hash, @@ -3440,7 +3451,8 @@ fn session_info_is_requested_only_once() { test_state.handle_resume_sync(&mut virtual_overseer, session).await; - // This leaf activation shouldn't fetch `SessionInfo` because the session is already cached + // This leaf activation shouldn't fetch `SessionInfo` because the session is already + // cached test_state .activate_leaf_at_session( &mut virtual_overseer, @@ -3475,8 +3487,8 @@ fn session_info_is_requested_only_once() { }); } -// Big jump means the new session we see with a leaf update is at least a `DISPUTE_WINDOW` bigger than -// the already known one. In this case The whole `DISPUTE_WINDOW` should be fetched. +// Big jump means the new session we see with a leaf update is at least a `DISPUTE_WINDOW` bigger +// than the already known one. In this case The whole `DISPUTE_WINDOW` should be fetched. #[test] fn session_info_big_jump_works() { test_harness(|mut test_state, mut virtual_overseer| { @@ -3485,7 +3497,8 @@ fn session_info_big_jump_works() { test_state.handle_resume_sync(&mut virtual_overseer, session_on_startup).await; - // This leaf activation shouldn't fetch `SessionInfo` because the session is already cached + // This leaf activation shouldn't fetch `SessionInfo` because the session is already + // cached test_state .activate_leaf_at_session( &mut virtual_overseer, @@ -3525,8 +3538,8 @@ fn session_info_big_jump_works() { }); } -// Small jump means the new session we see with a leaf update is at less than last known one + `DISPUTE_WINDOW`. In this -// case fetching should start from last known one + 1. +// Small jump means the new session we see with a leaf update is at less than last known one + +// `DISPUTE_WINDOW`. In this case fetching should start from last known one + 1. #[test] fn session_info_small_jump_works() { test_harness(|mut test_state, mut virtual_overseer| { @@ -3535,7 +3548,8 @@ fn session_info_small_jump_works() { test_state.handle_resume_sync(&mut virtual_overseer, session_on_startup).await; - // This leaf activation shouldn't fetch `SessionInfo` because the session is already cached + // This leaf activation shouldn't fetch `SessionInfo` because the session is already + // cached test_state .activate_leaf_at_session( &mut virtual_overseer, diff --git a/node/core/parachains-inherent/src/lib.rs b/node/core/parachains-inherent/src/lib.rs index f27481ee5a7d..3063147fb136 100644 --- a/node/core/parachains-inherent/src/lib.rs +++ b/node/core/parachains-inherent/src/lib.rs @@ -16,11 +16,12 @@ //! The parachain inherent data provider //! -//! Parachain backing and approval is an off-chain process, but the parachain needs to progress on chain as well. To -//! make it progress on chain a block producer needs to forward information about the state of a parachain to the -//! runtime. This information is forwarded through an inherent to the runtime. Here we provide the -//! [`ParachainInherentDataProvider`] that requests the relevant data from the provisioner subsystem and creates the -//! the inherent data that the runtime will use to create an inherent. +//! Parachain backing and approval is an off-chain process, but the parachain needs to progress on +//! chain as well. To make it progress on chain a block producer needs to forward information about +//! the state of a parachain to the runtime. This information is forwarded through an inherent to +//! the runtime. Here we provide the [`ParachainInherentDataProvider`] that requests the relevant +//! data from the provisioner subsystem and creates the the inherent data that the runtime will use +//! to create an inherent. #![deny(unused_crate_dependencies, unused_results)] diff --git a/node/core/provisioner/src/disputes/mod.rs b/node/core/provisioner/src/disputes/mod.rs index fab70a054698..2d8f6fb6e93b 100644 --- a/node/core/provisioner/src/disputes/mod.rs +++ b/node/core/provisioner/src/disputes/mod.rs @@ -14,7 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! The disputes module is responsible for selecting dispute votes to be sent with the inherent data. +//! The disputes module is responsible for selecting dispute votes to be sent with the inherent +//! data. use crate::LOG_TARGET; use futures::channel::oneshot; @@ -22,7 +23,8 @@ use polkadot_node_primitives::CandidateVotes; use polkadot_node_subsystem::{messages::DisputeCoordinatorMessage, overseer}; use polkadot_primitives::{CandidateHash, SessionIndex}; -/// Request the relevant dispute statements for a set of disputes identified by `CandidateHash` and the `SessionIndex`. +/// Request the relevant dispute statements for a set of disputes identified by `CandidateHash` and +/// the `SessionIndex`. async fn request_votes( sender: &mut impl overseer::ProvisionerSenderTrait, disputes_to_query: Vec<(SessionIndex, CandidateHash)>, diff --git a/node/core/provisioner/src/disputes/prioritized_selection/mod.rs b/node/core/provisioner/src/disputes/prioritized_selection/mod.rs index 5c8aaad422f2..096b73d271a8 100644 --- a/node/core/provisioner/src/disputes/prioritized_selection/mod.rs +++ b/node/core/provisioner/src/disputes/prioritized_selection/mod.rs @@ -48,7 +48,8 @@ pub const MAX_DISPUTE_VOTES_FORWARDED_TO_RUNTIME: usize = 200; /// Controls how much dispute votes to be fetched from the `dispute-coordinator` per iteration in /// `fn vote_selection`. The purpose is to fetch the votes in batches until /// `MAX_DISPUTE_VOTES_FORWARDED_TO_RUNTIME` is reached. If all votes are fetched in single call -/// we might fetch votes which we never use. This will create unnecessary load on `dispute-coordinator`. +/// we might fetch votes which we never use. This will create unnecessary load on +/// `dispute-coordinator`. /// /// This value should be less than `MAX_DISPUTE_VOTES_FORWARDED_TO_RUNTIME`. Increase it in case /// `provisioner` sends too many `QueryCandidateVotes` messages to `dispite-coordinator`. @@ -68,22 +69,23 @@ const VOTES_SELECTION_BATCH_SIZE: usize = 11; /// * Offchain vs Onchain /// * Concluded onchain vs Unconcluded onchain /// -/// Provisioner fetches all disputes from `dispute-coordinator` and separates them in multiple partitions. -/// Please refer to `struct PartitionedDisputes` for details about the actual partitions. -/// Each partition has got a priority implicitly assigned to it and the disputes are selected based on this -/// priority (e.g. disputes in partition 1, then if there is space - disputes from partition 2 and so on). +/// Provisioner fetches all disputes from `dispute-coordinator` and separates them in multiple +/// partitions. Please refer to `struct PartitionedDisputes` for details about the actual +/// partitions. Each partition has got a priority implicitly assigned to it and the disputes are +/// selected based on this priority (e.g. disputes in partition 1, then if there is space - disputes +/// from partition 2 and so on). /// /// # Votes selection /// -/// Besides the prioritization described above the votes in each partition are filtered too. Provisioner -/// fetches all onchain votes and filters them out from all partitions. As a result the Runtime receives -/// only fresh votes (votes it didn't know about). +/// Besides the prioritization described above the votes in each partition are filtered too. +/// Provisioner fetches all onchain votes and filters them out from all partitions. As a result the +/// Runtime receives only fresh votes (votes it didn't know about). /// /// # How the onchain votes are fetched /// -/// The logic outlined above relies on `RuntimeApiRequest::Disputes` message from the Runtime. The user -/// check the Runtime version before calling `select_disputes`. If the function is used with old runtime -/// an error is logged and the logic will continue with empty onchain votes `HashMap`. +/// The logic outlined above relies on `RuntimeApiRequest::Disputes` message from the Runtime. The +/// user check the Runtime version before calling `select_disputes`. If the function is used with +/// old runtime an error is logged and the logic will continue with empty onchain votes `HashMap`. pub async fn select_disputes( sender: &mut Sender, metrics: &metrics::Metrics, @@ -110,7 +112,8 @@ where r }, Err(GetOnchainDisputesError::NotSupported(runtime_api_err, relay_parent)) => { - // Runtime version is checked before calling this method, so the error below should never happen! + // Runtime version is checked before calling this method, so the error below should + // never happen! gum::error!( target: LOG_TARGET, ?runtime_api_err, @@ -152,7 +155,8 @@ where gum::trace!(target: LOG_TARGET, ?leaf, "Filtering recent disputes"); // Filter out unconfirmed disputes. However if the dispute is already onchain - don't skip it. - // In this case we'd better push as much fresh votes as possible to bring it to conclusion faster. + // In this case we'd better push as much fresh votes as possible to bring it to conclusion + // faster. let recent_disputes = recent_disputes .into_iter() .filter(|d| d.2.is_confirmed_concluded() || onchain.contains_key(&(d.0, d.1))) @@ -178,9 +182,9 @@ where make_multi_dispute_statement_set(metrics, result) } -/// Selects dispute votes from `PartitionedDisputes` which should be sent to the runtime. Votes which -/// are already onchain are filtered out. Result should be sorted by `(SessionIndex, CandidateHash)` -/// which is enforced by the `BTreeMap`. This is a requirement from the runtime. +/// Selects dispute votes from `PartitionedDisputes` which should be sent to the runtime. Votes +/// which are already onchain are filtered out. Result should be sorted by `(SessionIndex, +/// CandidateHash)` which is enforced by the `BTreeMap`. This is a requirement from the runtime. async fn vote_selection( sender: &mut Sender, partitioned: PartitionedDisputes, @@ -237,9 +241,9 @@ where for (session_index, candidate_hash, selected_votes) in votes { let votes_len = selected_votes.valid.raw().len() + selected_votes.invalid.len(); if votes_len + total_votes_len > MAX_DISPUTE_VOTES_FORWARDED_TO_RUNTIME { - // we are done - no more votes can be added. Importantly, we don't add any votes for a dispute here - // if we can't fit them all. This gives us an important invariant, that backing votes for - // disputes make it into the provisioned vote set. + // we are done - no more votes can be added. Importantly, we don't add any votes for + // a dispute here if we can't fit them all. This gives us an important invariant, + // that backing votes for disputes make it into the provisioned vote set. gum::trace!( target: LOG_TARGET, ?request_votes_counter, @@ -483,7 +487,8 @@ fn make_multi_dispute_statement_set( .collect() } -/// Gets the on-chain disputes at a given block number and returns them as a `HashMap` so that searching in them is cheap. +/// Gets the on-chain disputes at a given block number and returns them as a `HashMap` so that +/// searching in them is cheap. pub async fn get_onchain_disputes( sender: &mut Sender, relay_parent: Hash, diff --git a/node/core/provisioner/src/disputes/prioritized_selection/tests.rs b/node/core/provisioner/src/disputes/prioritized_selection/tests.rs index 4ae67e3b7968..7798ebe51aaf 100644 --- a/node/core/provisioner/src/disputes/prioritized_selection/tests.rs +++ b/node/core/provisioner/src/disputes/prioritized_selection/tests.rs @@ -237,21 +237,22 @@ fn partitioning_happy_case() { ); } -// This test verifies the double voting behavior. Currently we don't care if a supermajority is achieved with or -// without the 'help' of a double vote (a validator voting for and against at the same time). This makes the test -// a bit pointless but anyway I'm leaving it here to make this decision explicit and have the test code ready in -// case this behavior needs to be further tested in the future. -// Link to the PR with the discussions: https://github.com/paritytech/polkadot/pull/5567 +// This test verifies the double voting behavior. Currently we don't care if a supermajority is +// achieved with or without the 'help' of a double vote (a validator voting for and against at the +// same time). This makes the test a bit pointless but anyway I'm leaving it here to make this +// decision explicit and have the test code ready in case this behavior needs to be further tested +// in the future. Link to the PR with the discussions: https://github.com/paritytech/polkadot/pull/5567 #[test] fn partitioning_doubled_onchain_vote() { let mut input = Vec::<(SessionIndex, CandidateHash, DisputeStatus)>::new(); let mut onchain = HashMap::<(u32, CandidateHash), DisputeState>::new(); - // Dispute A relies on a 'double onchain vote' to conclude. Validator with index 0 has voted both `for` and `against`. - // Despite that this dispute should be considered 'can conclude onchain'. + // Dispute A relies on a 'double onchain vote' to conclude. Validator with index 0 has voted + // both `for` and `against`. Despite that this dispute should be considered 'can conclude + // onchain'. let dispute_a = (3, CandidateHash(Hash::random()), DisputeStatus::Active); - // Dispute B has supermajority + 1 votes, so the doubled onchain vote doesn't affect it. It should be considered - // as 'can conclude onchain'. + // Dispute B has supermajority + 1 votes, so the doubled onchain vote doesn't affect it. It + // should be considered as 'can conclude onchain'. let dispute_b = (4, CandidateHash(Hash::random()), DisputeStatus::Active); input.push(dispute_a.clone()); input.push(dispute_b.clone()); diff --git a/node/core/provisioner/src/error.rs b/node/core/provisioner/src/error.rs index 0f1747995843..5645ed2762bc 100644 --- a/node/core/provisioner/src/error.rs +++ b/node/core/provisioner/src/error.rs @@ -81,7 +81,8 @@ pub enum Error { OverseerExited(SubsystemError), } -/// Used by `get_onchain_disputes` to represent errors related to fetching on-chain disputes from the Runtime +/// Used by `get_onchain_disputes` to represent errors related to fetching on-chain disputes from +/// the Runtime #[allow(dead_code)] // Remove when promoting to stable #[fatality::fatality] pub enum GetOnchainDisputesError { diff --git a/node/core/provisioner/src/lib.rs b/node/core/provisioner/src/lib.rs index 3ae297fee736..b5073763dfab 100644 --- a/node/core/provisioner/src/lib.rs +++ b/node/core/provisioner/src/lib.rs @@ -466,11 +466,11 @@ async fn send_inherent_data( /// - not more than one per validator /// - each 1 bit must correspond to an occupied core /// -/// If we have too many, an arbitrary selection policy is fine. For purposes of maximizing availability, -/// we pick the one with the greatest number of 1 bits. +/// If we have too many, an arbitrary selection policy is fine. For purposes of maximizing +/// availability, we pick the one with the greatest number of 1 bits. /// -/// Note: This does not enforce any sorting precondition on the output; the ordering there will be unrelated -/// to the sorting of the input. +/// Note: This does not enforce any sorting precondition on the output; the ordering there will be +/// unrelated to the sorting of the input. fn select_availability_bitfields( cores: &[CoreState], bitfields: &[SignedAvailabilityBitfield], @@ -532,7 +532,8 @@ fn select_availability_bitfields( selected.into_values().collect() } -/// Determine which cores are free, and then to the degree possible, pick a candidate appropriate to each free core. +/// Determine which cores are free, and then to the degree possible, pick a candidate appropriate to +/// each free core. async fn select_candidates( availability_cores: &[CoreState], bitfields: &[SignedAvailabilityBitfield], @@ -593,7 +594,8 @@ async fn select_candidates( let computed_validation_data_hash = validation_data.hash(); - // we arbitrarily pick the first of the backed candidates which match the appropriate selection criteria + // we arbitrarily pick the first of the backed candidates which match the appropriate + // selection criteria if let Some(candidate) = candidates.iter().find(|backed_candidate| { let descriptor = &backed_candidate.descriptor; descriptor.para_id == scheduled_core.para_id && @@ -628,12 +630,12 @@ async fn select_candidates( gum::trace!(target: LOG_TARGET, leaf_hash=?relay_parent, "Got {} backed candidates", candidates.len()); - // `selected_candidates` is generated in ascending order by core index, and `GetBackedCandidates` - // _should_ preserve that property, but let's just make sure. + // `selected_candidates` is generated in ascending order by core index, and + // `GetBackedCandidates` _should_ preserve that property, but let's just make sure. // - // We can't easily map from `BackedCandidate` to `core_idx`, but we know that every selected candidate - // maps to either 0 or 1 backed candidate, and the hashes correspond. Therefore, by checking them - // in order, we can ensure that the backed candidates are also in order. + // We can't easily map from `BackedCandidate` to `core_idx`, but we know that every selected + // candidate maps to either 0 or 1 backed candidate, and the hashes correspond. Therefore, by + // checking them in order, we can ensure that the backed candidates are also in order. let mut backed_idx = 0; for selected in selected_candidates { if selected == @@ -705,8 +707,9 @@ fn bitfields_indicate_availability( let validator_idx = bitfield.validator_index().0 as usize; match availability.get_mut(validator_idx) { None => { - // in principle, this function might return a `Result` so that we can more clearly express this error condition - // however, in practice, that would just push off an error-handling routine which would look a whole lot like this one. + // in principle, this function might return a `Result` so that we can + // more clearly express this error condition however, in practice, that would just + // push off an error-handling routine which would look a whole lot like this one. // simpler to just handle the error internally here. gum::warn!( target: LOG_TARGET, @@ -726,8 +729,8 @@ fn bitfields_indicate_availability( 3 * availability.count_ones() >= 2 * availability.len() } -// If we have to be absolutely precise here, this method gets the version of the `ParachainHost` api. -// For brevity we'll just call it 'runtime version'. +// If we have to be absolutely precise here, this method gets the version of the `ParachainHost` +// api. For brevity we'll just call it 'runtime version'. async fn has_required_runtime( sender: &mut impl overseer::ProvisionerSenderTrait, relay_parent: Hash, diff --git a/node/core/provisioner/src/metrics.rs b/node/core/provisioner/src/metrics.rs index c65d999d04a7..fabbd798cf02 100644 --- a/node/core/provisioner/src/metrics.rs +++ b/node/core/provisioner/src/metrics.rs @@ -28,9 +28,10 @@ struct MetricsInner { /// Bitfields array length in `ProvisionerInherentData` (the result for `RequestInherentData`) inherent_data_response_bitfields: prometheus::Histogram, - /// The following metrics track how many disputes/votes the runtime will have to process. These will count - /// all recent statements meaning every dispute from last sessions: 10 min on Rococo, 60 min on Kusama and - /// 4 hours on Polkadot. The metrics are updated only when the node authors a block, so values vary across nodes. + /// The following metrics track how many disputes/votes the runtime will have to process. These + /// will count all recent statements meaning every dispute from last sessions: 10 min on + /// Rococo, 60 min on Kusama and 4 hours on Polkadot. The metrics are updated only when the + /// node authors a block, so values vary across nodes. inherent_data_dispute_statement_sets: prometheus::Counter, inherent_data_dispute_statements: prometheus::CounterVec, diff --git a/node/core/provisioner/src/tests.rs b/node/core/provisioner/src/tests.rs index e8692df8543a..4a469a43c893 100644 --- a/node/core/provisioner/src/tests.rs +++ b/node/core/provisioner/src/tests.rs @@ -90,7 +90,8 @@ mod select_availability_bitfields { let cores = vec![occupied_core(0), occupied_core(1)]; // we pass in three bitfields with two validators - // this helps us check the postcondition that we get two bitfields back, for which the validators differ + // this helps us check the postcondition that we get two bitfields back, for which the + // validators differ let bitfields = vec![ signed_bitfield(&keystore, bitvec.clone(), ValidatorIndex(0)), signed_bitfield(&keystore, bitvec.clone(), ValidatorIndex(1)), diff --git a/node/core/pvf-checker/src/lib.rs b/node/core/pvf-checker/src/lib.rs index 222e85e36542..2946f3f78861 100644 --- a/node/core/pvf-checker/src/lib.rs +++ b/node/core/pvf-checker/src/lib.rs @@ -110,8 +110,8 @@ struct State { /// /// Here are some fun facts about these futures: /// - /// - Pre-checking can take quite some time, in the matter of tens of seconds, so the futures here - /// can soak for quite some time. + /// - Pre-checking can take quite some time, in the matter of tens of seconds, so the futures + /// here can soak for quite some time. /// - Pre-checking of one PVF can take drastically more time than pre-checking of another PVF. /// This leads to results coming out of order. /// diff --git a/node/core/pvf-checker/src/tests.rs b/node/core/pvf-checker/src/tests.rs index 46e760936144..b223b1b54c0b 100644 --- a/node/core/pvf-checker/src/tests.rs +++ b/node/core/pvf-checker/src/tests.rs @@ -110,8 +110,8 @@ impl TestState { Self { leaves, sessions, last_session_index } } - /// A convenience function to receive a message from the overseer and returning `None` if nothing - /// was received within a reasonable (for local tests anyway) timeout. + /// A convenience function to receive a message from the overseer and returning `None` if + /// nothing was received within a reasonable (for local tests anyway) timeout. async fn recv_timeout(&mut self, handle: &mut VirtualOverseer) -> Option { futures::select! { msg = handle.recv().fuse() => { diff --git a/node/core/pvf/common/src/error.rs b/node/core/pvf/common/src/error.rs index 64d17800ac10..6eb0d9b7df42 100644 --- a/node/core/pvf/common/src/error.rs +++ b/node/core/pvf/common/src/error.rs @@ -18,8 +18,8 @@ use crate::prepare::PrepareStats; use parity_scale_codec::{Decode, Encode}; use std::fmt; -/// Result of PVF preparation performed by the validation host. Contains stats about the preparation if -/// successful +/// Result of PVF preparation performed by the validation host. Contains stats about the preparation +/// if successful pub type PrepareResult = Result; /// An error that occurred during the prepare part of the PVF pipeline. @@ -35,13 +35,15 @@ pub enum PrepareError { Panic(String), /// Failed to prepare the PVF due to the time limit. TimedOut, - /// An IO error occurred. This state is reported by either the validation host or by the worker. + /// An IO error occurred. This state is reported by either the validation host or by the + /// worker. IoErr(String), - /// The temporary file for the artifact could not be created at the given cache path. This state is reported by the - /// validation host (not by the worker). + /// The temporary file for the artifact could not be created at the given cache path. This + /// state is reported by the validation host (not by the worker). CreateTmpFileErr(String), - /// The response from the worker is received, but the file cannot be renamed (moved) to the final destination - /// location. This state is reported by the validation host (not by the worker). + /// The response from the worker is received, but the file cannot be renamed (moved) to the + /// final destination location. This state is reported by the validation host (not by the + /// worker). RenameTmpFileErr(String), } @@ -81,15 +83,16 @@ impl fmt::Display for PrepareError { /// Some internal error occurred. /// -/// Should only ever be used for validation errors independent of the candidate and PVF, or for errors we ruled out -/// during pre-checking (so preparation errors are fine). +/// Should only ever be used for validation errors independent of the candidate and PVF, or for +/// errors we ruled out during pre-checking (so preparation errors are fine). #[derive(Debug, Clone, Encode, Decode)] pub enum InternalValidationError { /// Some communication error occurred with the host. HostCommunication(String), /// Could not find or open compiled artifact file. CouldNotOpenFile(String), - /// An error occurred in the CPU time monitor thread. Should be totally unrelated to validation. + /// An error occurred in the CPU time monitor thread. Should be totally unrelated to + /// validation. CpuTimeMonitorThread(String), /// Some non-deterministic preparation error occurred. NonDeterministicPrepareError(PrepareError), diff --git a/node/core/pvf/common/src/executor_intf.rs b/node/core/pvf/common/src/executor_intf.rs index ef74e5f2ca92..42ed4b79c761 100644 --- a/node/core/pvf/common/src/executor_intf.rs +++ b/node/core/pvf/common/src/executor_intf.rs @@ -35,10 +35,10 @@ use std::any::{Any, TypeId}; // left for the stack; this is, of course, overridable at link time when compiling the runtime) // plus the number of pages specified in the `extra_heap_pages` passed to the executor. // -// By default, rustc (or `lld` specifically) should allocate 1 MiB for the shadow stack, or 16 pages. -// The data section for runtimes are typically rather small and can fit in a single digit number of -// WASM pages, so let's say an extra 16 pages. Thus let's assume that 32 pages or 2 MiB are used for -// these needs by default. +// By default, rustc (or `lld` specifically) should allocate 1 MiB for the shadow stack, or 16 +// pages. The data section for runtimes are typically rather small and can fit in a single digit +// number of WASM pages, so let's say an extra 16 pages. Thus let's assume that 32 pages or 2 MiB +// are used for these needs by default. const DEFAULT_HEAP_PAGES_ESTIMATE: u32 = 32; const EXTRA_HEAP_PAGES: u32 = 2048; @@ -65,9 +65,9 @@ pub const DEFAULT_CONFIG: Config = Config { // // Here is how the values below were chosen. // - // At the moment of writing, the default native stack size limit is 1 MiB. Assuming a logical item - // (see the docs about the field and the instrumentation algorithm) is 8 bytes, 1 MiB can - // fit 2x 65536 logical items. + // At the moment of writing, the default native stack size limit is 1 MiB. Assuming a + // logical item (see the docs about the field and the instrumentation algorithm) is 8 bytes, + // 1 MiB can fit 2x 65536 logical items. // // Since reaching the native stack limit is undesirable, we halve the logical item limit and // also increase the native 256x. This hopefully should preclude wasm code from reaching @@ -113,7 +113,7 @@ pub fn params_to_wasmtime_semantics(par: &ExecutorParams) -> Result sem.wasm_bulk_memory = true, // TODO: Not implemented yet; . ExecutorParam::PrecheckingMaxMemory(_) => (), - ExecutorParam::PvfPrepTimeout(_, _) | ExecutorParam::PvfExecTimeout(_, _) => (), // Not used here + ExecutorParam::PvfPrepTimeout(_, _) | ExecutorParam::PvfExecTimeout(_, _) => (), /* Not used here */ } } sem.deterministic_stack_limit = Some(stack_limit); @@ -135,8 +135,8 @@ impl Executor { Ok(Self { config }) } - /// Executes the given PVF in the form of a compiled artifact and returns the result of execution - /// upon success. + /// Executes the given PVF in the form of a compiled artifact and returns the result of + /// execution upon success. /// /// # Safety /// diff --git a/node/core/pvf/common/src/worker/mod.rs b/node/core/pvf/common/src/worker/mod.rs index 8dd99fc762d8..d9a0dff71b24 100644 --- a/node/core/pvf/common/src/worker/mod.rs +++ b/node/core/pvf/common/src/worker/mod.rs @@ -251,9 +251,9 @@ pub mod thread { Arc::new((Mutex::new(WaitOutcome::Pending), Condvar::new())) } - /// Runs a worker thread. Will first enable security features, and afterwards notify the threads waiting on the - /// condvar. Catches panics during execution and resumes the panics after triggering the condvar, so that the - /// waiting thread is notified on panics. + /// Runs a worker thread. Will first enable security features, and afterwards notify the threads + /// waiting on the condvar. Catches panics during execution and resumes the panics after + /// triggering the condvar, so that the waiting thread is notified on panics. /// /// # Returns /// diff --git a/node/core/pvf/execute-worker/src/lib.rs b/node/core/pvf/execute-worker/src/lib.rs index c6ee515f9093..6f632a0ae95e 100644 --- a/node/core/pvf/execute-worker/src/lib.rs +++ b/node/core/pvf/execute-worker/src/lib.rs @@ -239,7 +239,8 @@ pub fn worker_entrypoint( WaitOutcome::TimedOut => { match cpu_time_monitor_thread.join() { Ok(Some(cpu_time_elapsed)) => { - // Log if we exceed the timeout and the other thread hasn't finished. + // Log if we exceed the timeout and the other thread hasn't + // finished. gum::warn!( target: LOG_TARGET, %worker_pid, diff --git a/node/core/pvf/prepare-worker/src/lib.rs b/node/core/pvf/prepare-worker/src/lib.rs index c9d258625df9..ac116cf78631 100644 --- a/node/core/pvf/prepare-worker/src/lib.rs +++ b/node/core/pvf/prepare-worker/src/lib.rs @@ -206,8 +206,9 @@ pub fn worker_entrypoint( // If we are pre-checking, check for runtime construction errors. // - // As pre-checking is more strict than just preparation in terms of memory and - // time, it is okay to do extra checks here. This takes negligible time anyway. + // As pre-checking is more strict than just preparation in terms of memory + // and time, it is okay to do extra checks here. This takes negligible time + // anyway. if let PrepareJobKind::Prechecking = prepare_job_kind { result = result.and_then(|output| { runtime_construction_check(output.0.as_ref(), executor_params)?; @@ -269,10 +270,11 @@ pub fn worker_entrypoint( // Write the serialized artifact into a temp file. // - // PVF host only keeps artifacts statuses in its memory, successfully - // compiled code gets stored on the disk (and consequently deserialized - // by execute-workers). The prepare worker is only required to send `Ok` - // to the pool to indicate the success. + // PVF host only keeps artifacts statuses in its memory, + // successfully compiled code gets stored on the disk (and + // consequently deserialized by execute-workers). The prepare worker + // is only required to send `Ok` to the pool to indicate the + // success. gum::debug!( target: LOG_TARGET, @@ -291,7 +293,8 @@ pub fn worker_entrypoint( WaitOutcome::TimedOut => { match cpu_time_monitor_thread.join() { Ok(Some(cpu_time_elapsed)) => { - // Log if we exceed the timeout and the other thread hasn't finished. + // Log if we exceed the timeout and the other thread hasn't + // finished. gum::warn!( target: LOG_TARGET, %worker_pid, diff --git a/node/core/pvf/prepare-worker/src/memory_stats.rs b/node/core/pvf/prepare-worker/src/memory_stats.rs index e6dc8572c4a3..7904dfa9cb88 100644 --- a/node/core/pvf/prepare-worker/src/memory_stats.rs +++ b/node/core/pvf/prepare-worker/src/memory_stats.rs @@ -83,8 +83,8 @@ pub mod memory_tracker { /// /// # Errors /// - /// For simplicity, any errors are returned as a string. As this is not a critical component, errors - /// are used for informational purposes (logging) only. + /// For simplicity, any errors are returned as a string. As this is not a critical component, + /// errors are used for informational purposes (logging) only. pub fn memory_tracker_loop(condvar: thread::Cond) -> Result { // NOTE: This doesn't need to be too fine-grained since preparation currently takes 3-10s or // more. Apart from that, there is not really a science to this number. diff --git a/node/core/pvf/src/artifacts.rs b/node/core/pvf/src/artifacts.rs index 78d2f88941b8..a180af15db27 100644 --- a/node/core/pvf/src/artifacts.rs +++ b/node/core/pvf/src/artifacts.rs @@ -224,7 +224,8 @@ impl Artifacts { .is_none()); } - /// Remove and retrieve the artifacts from the table that are older than the supplied Time-To-Live. + /// Remove and retrieve the artifacts from the table that are older than the supplied + /// Time-To-Live. pub fn prune(&mut self, artifact_ttl: Duration) -> Vec { let now = SystemTime::now(); diff --git a/node/core/pvf/src/error.rs b/node/core/pvf/src/error.rs index 7372cd233c49..cb35ec9e9d9a 100644 --- a/node/core/pvf/src/error.rs +++ b/node/core/pvf/src/error.rs @@ -38,29 +38,30 @@ pub enum InvalidCandidate { /// The worker has died during validation of a candidate. That may fall in one of the following /// categories, which we cannot distinguish programmatically: /// - /// (a) Some sort of transient glitch caused the worker process to abort. An example would be that - /// the host machine ran out of free memory and the OOM killer started killing the processes, - /// and in order to save the parent it will "sacrifice child" first. + /// (a) Some sort of transient glitch caused the worker process to abort. An example would be + /// that the host machine ran out of free memory and the OOM killer started killing the + /// processes, and in order to save the parent it will "sacrifice child" first. /// /// (b) The candidate triggered a code path that has lead to the process death. For example, - /// the PVF found a way to consume unbounded amount of resources and then it either exceeded - /// an `rlimit` (if set) or, again, invited OOM killer. Another possibility is a bug in - /// wasmtime allowed the PVF to gain control over the execution worker. + /// the PVF found a way to consume unbounded amount of resources and then it either + /// exceeded an `rlimit` (if set) or, again, invited OOM killer. Another possibility is a + /// bug in wasmtime allowed the PVF to gain control over the execution worker. /// /// We attribute such an event to an *invalid candidate* in either case. /// /// The rationale for this is that a glitch may lead to unfair rejecting candidate by a single - /// validator. If the glitch is somewhat more persistent the validator will reject all candidate - /// thrown at it and hopefully the operator notices it by decreased reward performance of the - /// validator. On the other hand, if the worker died because of (b) we would have better chances - /// to stop the attack. + /// validator. If the glitch is somewhat more persistent the validator will reject all + /// candidate thrown at it and hopefully the operator notices it by decreased reward + /// performance of the validator. On the other hand, if the worker died because of (b) we would + /// have better chances to stop the attack. AmbiguousWorkerDeath, /// PVF execution (compilation is not included) took more time than was allotted. HardTimeout, - /// A panic occurred and we can't be sure whether the candidate is really invalid or some internal glitch occurred. - /// Whenever we are unsure, we can never treat an error as internal as we would abstain from voting. This is bad - /// because if the issue was due to the candidate, then all validators would abstain, stalling finality on the - /// chain. So we will first retry the candidate, and if the issue persists we are forced to vote invalid. + /// A panic occurred and we can't be sure whether the candidate is really invalid or some + /// internal glitch occurred. Whenever we are unsure, we can never treat an error as internal + /// as we would abstain from voting. This is bad because if the issue was due to the candidate, + /// then all validators would abstain, stalling finality on the chain. So we will first retry + /// the candidate, and if the issue persists we are forced to vote invalid. Panic(String), } diff --git a/node/core/pvf/src/execute/queue.rs b/node/core/pvf/src/execute/queue.rs index 33a1c6f89709..acb260e25693 100644 --- a/node/core/pvf/src/execute/queue.rs +++ b/node/core/pvf/src/execute/queue.rs @@ -419,7 +419,8 @@ fn spawn_extra_worker(queue: &mut Queue, job: ExecuteJob) { /// beforehand. In such a way, a race condition is avoided: during the worker being spawned, /// another job in the queue, with an incompatible execution environment, may become stale, and /// the queue would have to kill a newly started worker and spawn another one. -/// Nevertheless, if the worker finishes executing the job, it becomes idle and may be used to execute other jobs with a compatible execution environment. +/// Nevertheless, if the worker finishes executing the job, it becomes idle and may be used to +/// execute other jobs with a compatible execution environment. async fn spawn_worker_task( program_path: PathBuf, job: ExecuteJob, diff --git a/node/core/pvf/src/execute/worker_intf.rs b/node/core/pvf/src/execute/worker_intf.rs index 9d8b61d10447..948abd2261d7 100644 --- a/node/core/pvf/src/execute/worker_intf.rs +++ b/node/core/pvf/src/execute/worker_intf.rs @@ -74,8 +74,9 @@ pub enum Outcome { /// PVF execution completed successfully and the result is returned. The worker is ready for /// another job. Ok { result_descriptor: ValidationResult, duration: Duration, idle_worker: IdleWorker }, - /// The candidate validation failed. It may be for example because the wasm execution triggered a trap. - /// Errors related to the preparation process are not expected to be encountered by the execution workers. + /// The candidate validation failed. It may be for example because the wasm execution triggered + /// a trap. Errors related to the preparation process are not expected to be encountered by the + /// execution workers. InvalidCandidate { err: String, idle_worker: IdleWorker }, /// An internal error happened during the validation. Such an error is most likely related to /// some transient glitch. @@ -95,7 +96,8 @@ pub enum Outcome { /// Given the idle token of a worker and parameters of work, communicates with the worker and /// returns the outcome. /// -/// NOTE: Not returning the idle worker token in `Outcome` will trigger the child process being killed. +/// NOTE: Not returning the idle worker token in `Outcome` will trigger the child process being +/// killed. pub async fn start_work( worker: IdleWorker, artifact: ArtifactPathId, diff --git a/node/core/pvf/src/host.rs b/node/core/pvf/src/host.rs index a5772e34e16e..9f3b7e23fd89 100644 --- a/node/core/pvf/src/host.rs +++ b/node/core/pvf/src/host.rs @@ -455,8 +455,8 @@ async fn handle_precheck_pvf( ArtifactState::Preparing { waiting_for_response, num_failures: _ } => waiting_for_response.push(result_sender), ArtifactState::FailedToProcess { error, .. } => { - // Do not retry failed preparation if another pre-check request comes in. We do not retry pre-checking, - // anyway. + // Do not retry failed preparation if another pre-check request comes in. We do not + // retry pre-checking, anyway. let _ = result_sender.send(PrepareResult::Err(error.clone())); }, } @@ -470,8 +470,8 @@ async fn handle_precheck_pvf( /// Handles PVF execution. /// -/// This will try to prepare the PVF, if a prepared artifact does not already exist. If there is already a -/// preparation job, we coalesce the two preparation jobs. +/// This will try to prepare the PVF, if a prepared artifact does not already exist. If there is +/// already a preparation job, we coalesce the two preparation jobs. /// /// If the prepare job succeeded previously, we will enqueue an execute job right away. /// @@ -521,7 +521,8 @@ async fn handle_execute_pvf( "handle_execute_pvf: Re-queuing PVF preparation for prepared artifact with missing file." ); - // The artifact has been prepared previously but the file is missing, prepare it again. + // The artifact has been prepared previously but the file is missing, prepare it + // again. *state = ArtifactState::Preparing { waiting_for_response: Vec::new(), num_failures: 0, @@ -721,8 +722,8 @@ async fn handle_prepare_done( pending_requests { if result_tx.is_canceled() { - // Preparation could've taken quite a bit of time and the requester may be not interested - // in execution anymore, in which case we just skip the request. + // Preparation could've taken quite a bit of time and the requester may be not + // interested in execution anymore, in which case we just skip the request. continue } @@ -855,8 +856,8 @@ fn can_retry_prepare_after_failure( return false } - // Retry if the retry cooldown has elapsed and if we have already retried less than `NUM_PREPARE_RETRIES` times. IO - // errors may resolve themselves. + // Retry if the retry cooldown has elapsed and if we have already retried less than + // `NUM_PREPARE_RETRIES` times. IO errors may resolve themselves. SystemTime::now() >= last_time_failed + PREPARE_FAILURE_COOLDOWN && num_failures <= NUM_PREPARE_RETRIES } diff --git a/node/core/pvf/src/lib.rs b/node/core/pvf/src/lib.rs index eb6ab39ac500..1da0593835fb 100644 --- a/node/core/pvf/src/lib.rs +++ b/node/core/pvf/src/lib.rs @@ -32,26 +32,26 @@ //! (a) PVF pre-checking. This takes the `Pvf` code and tries to prepare it (verify and //! compile) in order to pre-check its validity. //! -//! (b) PVF execution. This accepts the PVF [`params`][`polkadot_parachain::primitives::ValidationParams`] -//! and the `Pvf` code, prepares (verifies and compiles) the code, and then executes PVF -//! with the `params`. +//! (b) PVF execution. This accepts the PVF +//! [`params`][`polkadot_parachain::primitives::ValidationParams`] and the `Pvf` code, prepares +//! (verifies and compiles) the code, and then executes PVF with the `params`. //! //! (c) Heads up. This request allows to signal that the given PVF may be needed soon and that it //! should be prepared for execution. //! -//! The preparation results are cached for some time after they either used or was signaled in heads up. -//! All requests that depends on preparation of the same PVF are bundled together and will be executed -//! as soon as the artifact is prepared. +//! The preparation results are cached for some time after they either used or was signaled in heads +//! up. All requests that depends on preparation of the same PVF are bundled together and will be +//! executed as soon as the artifact is prepared. //! //! # Priority //! -//! PVF execution requests can specify the [priority][`Priority`] with which the given request should -//! be handled. Different priority levels have different effects. This is discussed below. +//! PVF execution requests can specify the [priority][`Priority`] with which the given request +//! should be handled. Different priority levels have different effects. This is discussed below. //! //! Preparation started by a heads up signal always starts with the background priority. If there -//! is already a request for that PVF preparation under way the priority is inherited. If after heads -//! up, a new PVF execution request comes in with a higher priority, then the original task's priority -//! will be adjusted to match the new one if it's larger. +//! is already a request for that PVF preparation under way the priority is inherited. If after +//! heads up, a new PVF execution request comes in with a higher priority, then the original task's +//! priority will be adjusted to match the new one if it's larger. //! //! Priority can never go down, only up. //! @@ -63,11 +63,11 @@ //! dissimilar to actors. Each of such "processes" is a future task that contains an event loop that //! processes incoming messages, potentially delegating sub-tasks to other "processes". //! -//! Two of these processes are queues. The first one is for preparation jobs and the second one is for -//! execution. Both of the queues are backed by separate pools of workers of different kind. +//! Two of these processes are queues. The first one is for preparation jobs and the second one is +//! for execution. Both of the queues are backed by separate pools of workers of different kind. //! -//! Preparation workers handle preparation requests by prevalidating and instrumenting PVF wasm code, -//! and then passing it into the compiler, to prepare the artifact. +//! Preparation workers handle preparation requests by prevalidating and instrumenting PVF wasm +//! code, and then passing it into the compiler, to prepare the artifact. //! //! ## Artifacts //! diff --git a/node/core/pvf/src/metrics.rs b/node/core/pvf/src/metrics.rs index 62f8c6dc5157..3d792793498b 100644 --- a/node/core/pvf/src/metrics.rs +++ b/node/core/pvf/src/metrics.rs @@ -85,7 +85,8 @@ impl Metrics { #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] if let Some(tracker_stats) = memory_stats.memory_tracker_stats { - // We convert these stats from B to KB to match the unit of `ru_maxrss` from `getrusage`. + // We convert these stats from B to KB to match the unit of `ru_maxrss` from + // `getrusage`. let max_resident_kb = (tracker_stats.resident / 1024) as f64; let max_allocated_kb = (tracker_stats.allocated / 1024) as f64; diff --git a/node/core/pvf/src/prepare/pool.rs b/node/core/pvf/src/prepare/pool.rs index 1e8ccc7365bf..92aa4896c263 100644 --- a/node/core/pvf/src/prepare/pool.rs +++ b/node/core/pvf/src/prepare/pool.rs @@ -61,9 +61,9 @@ pub enum ToPool { /// Request the given worker to start working on the given code. /// - /// Once the job either succeeded or failed, a [`FromPool::Concluded`] message will be sent back. - /// It's also possible that the worker dies before handling the message in which case [`FromPool::Rip`] - /// will be sent back. + /// Once the job either succeeded or failed, a [`FromPool::Concluded`] message will be sent + /// back. It's also possible that the worker dies before handling the message in which case + /// [`FromPool::Rip`] will be sent back. /// /// In either case, the worker is considered busy and no further `StartWork` messages should be /// sent until either `Concluded` or `Rip` message is received. @@ -237,8 +237,8 @@ fn handle_to_pool( ); } else { // idle token is present after spawn and after a job is concluded; - // the precondition for `StartWork` is it should be sent only if all previous work - // items concluded; + // the precondition for `StartWork` is it should be sent only if all previous + // work items concluded; // thus idle token is Some; // qed. never!("unexpected absence of the idle token in prepare pool"); @@ -311,7 +311,8 @@ fn handle_mux( match outcome { Outcome::Concluded { worker: idle, result } => handle_concluded_no_rip(from_pool, spawned, worker, idle, result), - // Return `Concluded`, but do not kill the worker since the error was on the host side. + // Return `Concluded`, but do not kill the worker since the error was on the host + // side. Outcome::CreateTmpFileErr { worker: idle, err } => handle_concluded_no_rip( from_pool, spawned, @@ -319,7 +320,8 @@ fn handle_mux( idle, Err(PrepareError::CreateTmpFileErr(err)), ), - // Return `Concluded`, but do not kill the worker since the error was on the host side. + // Return `Concluded`, but do not kill the worker since the error was on the host + // side. Outcome::RenameTmpFileErr { worker: idle, result: _, err } => handle_concluded_no_rip( from_pool, diff --git a/node/core/pvf/src/prepare/queue.rs b/node/core/pvf/src/prepare/queue.rs index 5e19a4c7217a..c38012d74548 100644 --- a/node/core/pvf/src/prepare/queue.rs +++ b/node/core/pvf/src/prepare/queue.rs @@ -96,8 +96,9 @@ impl WorkerData { } } -/// A queue structured like this is prone to starving, however, we don't care that much since we expect -/// there is going to be a limited number of critical jobs and we don't really care if background starve. +/// A queue structured like this is prone to starving, however, we don't care that much since we +/// expect there is going to be a limited number of critical jobs and we don't really care if +/// background starve. #[derive(Default)] struct Unscheduled { normal: VecDeque, diff --git a/node/core/pvf/src/prepare/worker_intf.rs b/node/core/pvf/src/prepare/worker_intf.rs index d0d9a026dda7..5280ab6b42a2 100644 --- a/node/core/pvf/src/prepare/worker_intf.rs +++ b/node/core/pvf/src/prepare/worker_intf.rs @@ -247,8 +247,8 @@ where let outcome = f(tmp_file.clone(), stream).await; - // The function called above is expected to move `tmp_file` to a new location upon success. However, - // the function may as well fail and in that case we should remove the tmp file here. + // The function called above is expected to move `tmp_file` to a new location upon success. + // However, the function may as well fail and in that case we should remove the tmp file here. // // In any case, we try to remove the file here so that there are no leftovers. We only report // errors that are different from the `NotFound`. diff --git a/node/core/pvf/src/worker_intf.rs b/node/core/pvf/src/worker_intf.rs index ef5733ec0e6d..795ad4524443 100644 --- a/node/core/pvf/src/worker_intf.rs +++ b/node/core/pvf/src/worker_intf.rs @@ -196,13 +196,15 @@ pub enum SpawnErr { Handshake, } -/// This is a representation of a potentially running worker. Drop it and the process will be killed. +/// This is a representation of a potentially running worker. Drop it and the process will be +/// killed. /// /// A worker's handle is also a future that resolves when it's detected that the worker's process /// has been terminated. Since the worker is running in another process it is obviously not /// necessary to poll this future to make the worker run, it's only for termination detection. /// -/// This future relies on the fact that a child process's stdout `fd` is closed upon it's termination. +/// This future relies on the fact that a child process's stdout `fd` is closed upon it's +/// termination. #[pin_project] pub struct WorkerHandle { child: process::Child, @@ -240,15 +242,15 @@ impl WorkerHandle { child_id, stdout, program: program.as_ref().to_path_buf(), - // We don't expect the bytes to be ever read. But in case we do, we should not use a buffer - // of a small size, because otherwise if the child process does return any data we will end up - // issuing a syscall for each byte. We also prefer not to do allocate that on the stack, since - // each poll the buffer will be allocated and initialized (and that's due `poll_read` takes &mut [u8] - // and there are no guarantees that a `poll_read` won't ever read from there even though that's - // unlikely). + // We don't expect the bytes to be ever read. But in case we do, we should not use a + // buffer of a small size, because otherwise if the child process does return any data + // we will end up issuing a syscall for each byte. We also prefer not to do allocate + // that on the stack, since each poll the buffer will be allocated and initialized (and + // that's due `poll_read` takes &mut [u8] and there are no guarantees that a `poll_read` + // won't ever read from there even though that's unlikely). // - // OTOH, we also don't want to be super smart here and we could just afford to allocate a buffer - // for that here. + // OTOH, we also don't want to be super smart here and we could just afford to allocate + // a buffer for that here. drop_box: vec![0; 8192].into_boxed_slice(), }) } @@ -280,8 +282,8 @@ impl futures::Future for WorkerHandle { } }, Err(err) => { - // The implementation is guaranteed to not to return `WouldBlock` and Interrupted. This - // leaves us with legit errors which we suppose were due to termination. + // The implementation is guaranteed to not to return `WouldBlock` and Interrupted. + // This leaves us with legit errors which we suppose were due to termination. // Log the status code. gum::debug!( diff --git a/node/core/runtime-api/src/lib.rs b/node/core/runtime-api/src/lib.rs index 252bb21b0edb..0ee5ca24ceee 100644 --- a/node/core/runtime-api/src/lib.rs +++ b/node/core/runtime-api/src/lib.rs @@ -321,7 +321,8 @@ where return futures::pending!() } - // If there are active requests, this will always resolve to `Some(_)` when a request is finished. + // If there are active requests, this will always resolve to `Some(_)` when a request is + // finished. if let Some(Ok(Some(result))) = self.active_requests.next().await { self.store_cache(result); } @@ -343,10 +344,10 @@ where { loop { // Let's add some back pressure when the subsystem is running at `MAX_PARALLEL_REQUESTS`. - // This can never block forever, because `active_requests` is owned by this task and any mutations - // happen either in `poll_requests` or `spawn_request` - so if `is_busy` returns true, then - // even if all of the requests finish before us calling `poll_requests` the `active_requests` length - // remains invariant. + // This can never block forever, because `active_requests` is owned by this task and any + // mutations happen either in `poll_requests` or `spawn_request` - so if `is_busy` returns + // true, then even if all of the requests finish before us calling `poll_requests` the + // `active_requests` length remains invariant. if subsystem.is_busy() { // Since we are not using any internal waiting queues, we need to wait for exactly // one request to complete before we can read the next one from the overseer channel. diff --git a/node/core/runtime-api/src/tests.rs b/node/core/runtime-api/src/tests.rs index 27090a102ec2..33f5eef3869f 100644 --- a/node/core/runtime-api/src/tests.rs +++ b/node/core/runtime-api/src/tests.rs @@ -895,7 +895,8 @@ fn multiple_requests_in_parallel_are_working() { receivers.push(rx); } - // The backpressure from reaching `MAX_PARALLEL_REQUESTS` will make the test block, we need to drop the lock. + // The backpressure from reaching `MAX_PARALLEL_REQUESTS` will make the test block, we need + // to drop the lock. drop(lock); for _ in 0..MAX_PARALLEL_REQUESTS * 100 { diff --git a/node/gum/src/lib.rs b/node/gum/src/lib.rs index e989a15ae4e3..1cc4d8dec1cb 100644 --- a/node/gum/src/lib.rs +++ b/node/gum/src/lib.rs @@ -67,14 +67,13 @@ //! //! Here's the rundown on how fields work: //! -//! - Fields on spans and events are specified using the `syntax field_name = -//! field_value`. -//! - Local variables may be used as field values without an assignment, similar to -//! struct initializers. -//! - The `?` sigil is shorthand that specifies a field should be recorded using its -//! `fmt::Debug` implementation. -//! - The `%` sigil operates similarly, but indicates that the value should be -//! recorded using its `fmt::Display` implementation. +//! - Fields on spans and events are specified using the `syntax field_name = field_value`. +//! - Local variables may be used as field values without an assignment, similar to struct +//! initializers. +//! - The `?` sigil is shorthand that specifies a field should be recorded using its `fmt::Debug` +//! implementation. +//! - The `%` sigil operates similarly, but indicates that the value should be recorded using its +//! `fmt::Display` implementation. //! //! For full details, again see [the tracing //! docs](https://docs.rs/tracing/latest/tracing/index.html#recording-fields). diff --git a/node/jaeger/src/lib.rs b/node/jaeger/src/lib.rs index 99222589d4ab..7de458606816 100644 --- a/node/jaeger/src/lib.rs +++ b/node/jaeger/src/lib.rs @@ -132,7 +132,8 @@ impl Jaeger { match tokio::net::UdpSocket::bind("0.0.0.0:0").await { Ok(udp_socket) => loop { let buf = traces_out.next().await; - // UDP sending errors happen only either if the API is misused or in case of missing privilege. + // UDP sending errors happen only either if the API is misused or in case of + // missing privilege. if let Err(e) = udp_socket.send_to(&buf, jaeger_agent).await { log::debug!(target: "jaeger", "UDP send error: {}", e); } diff --git a/node/jaeger/src/spans.rs b/node/jaeger/src/spans.rs index be8bf9cd5ddc..4038d41344f2 100644 --- a/node/jaeger/src/spans.rs +++ b/node/jaeger/src/spans.rs @@ -110,8 +110,8 @@ impl PerLeafSpan { /// Creates a new instance. /// /// Takes the `leaf_span` that is created by the overseer per leaf and a name for a child span. - /// Both will be stored in this object, while the child span is implicitly accessible by using the - /// [`Deref`](std::ops::Deref) implementation. + /// Both will be stored in this object, while the child span is implicitly accessible by using + /// the [`Deref`](std::ops::Deref) implementation. pub fn new(leaf_span: Arc, name: &'static str) -> Self { let span = leaf_span.child(name); diff --git a/node/malus/src/variants/common.rs b/node/malus/src/variants/common.rs index 4ea8b88b56a5..ab1dfbbb360a 100644 --- a/node/malus/src/variants/common.rs +++ b/node/malus/src/variants/common.rs @@ -125,8 +125,8 @@ where Self { fake_validation, fake_validation_error, distribution, spawner } } - /// Creates and sends the validation response for a given candidate. Queries the runtime to obtain the validation data for the - /// given candidate. + /// Creates and sends the validation response for a given candidate. Queries the runtime to + /// obtain the validation data for the given candidate. pub fn send_validation_response( &self, candidate_descriptor: CandidateDescriptor, @@ -203,7 +203,8 @@ where { type Message = CandidateValidationMessage; - // Capture all (approval and backing) candidate validation requests and depending on configuration fail them. + // Capture all (approval and backing) candidate validation requests and depending on + // configuration fail them. fn intercept_incoming( &self, subsystem_sender: &mut Sender, @@ -279,7 +280,8 @@ where }, FakeCandidateValidation::ApprovalInvalid | FakeCandidateValidation::BackingAndApprovalInvalid => { - // Set the validation result to invalid with probability `p` and trigger a dispute + // Set the validation result to invalid with probability `p` and trigger a + // dispute let behave_maliciously = self.distribution.sample(&mut rand::thread_rng()); match behave_maliciously { true => { @@ -294,7 +296,8 @@ where &validation_result, ); - // We're not even checking the candidate, this makes us appear faster than honest validators. + // We're not even checking the candidate, this makes us appear + // faster than honest validators. sender.send(Ok(validation_result)).unwrap(); None }, @@ -370,7 +373,8 @@ where ); None }, - // If the `PoV` is malicious, we behave normally with some probability `(1-p)` + // If the `PoV` is malicious, we behave normally with some probability + // `(1-p)` false => Some(FromOrchestra::Communication { msg: CandidateValidationMessage::ValidateFromChainState( candidate_receipt, @@ -383,7 +387,8 @@ where }, FakeCandidateValidation::BackingInvalid | FakeCandidateValidation::BackingAndApprovalInvalid => { - // Maliciously set the validation result to invalid for a valid candidate with probability `p` + // Maliciously set the validation result to invalid for a valid candidate + // with probability `p` let behave_maliciously = self.distribution.sample(&mut rand::thread_rng()); match behave_maliciously { true => { @@ -396,7 +401,8 @@ where "😈 Maliciously sending invalid validation result: {:?}.", &validation_result, ); - // We're not even checking the candidate, this makes us appear faster than honest validators. + // We're not even checking the candidate, this makes us appear + // faster than honest validators. response_sender.send(Ok(validation_result)).unwrap(); None }, diff --git a/node/malus/src/variants/dispute_valid_candidates.rs b/node/malus/src/variants/dispute_valid_candidates.rs index ab1fba478beb..9ea8449a1d0b 100644 --- a/node/malus/src/variants/dispute_valid_candidates.rs +++ b/node/malus/src/variants/dispute_valid_candidates.rs @@ -45,14 +45,15 @@ use std::sync::Arc; #[command(rename_all = "kebab-case")] #[allow(missing_docs)] pub struct DisputeAncestorOptions { - /// Malicious candidate validation subsystem configuration. When enabled, node PVF execution is skipped - /// during backing and/or approval and it's result can by specified by this option and `--fake-validation-error` - /// for invalid candidate outcomes. + /// Malicious candidate validation subsystem configuration. When enabled, node PVF execution is + /// skipped during backing and/or approval and it's result can by specified by this option and + /// `--fake-validation-error` for invalid candidate outcomes. #[arg(long, value_enum, ignore_case = true, default_value_t = FakeCandidateValidation::BackingAndApprovalInvalid)] pub fake_validation: FakeCandidateValidation, - /// Applies only when `--fake-validation` is configured to reject candidates as invalid. It allows - /// to specify the exact error to return from the malicious candidate validation subsystem. + /// Applies only when `--fake-validation` is configured to reject candidates as invalid. It + /// allows to specify the exact error to return from the malicious candidate validation + /// subsystem. #[arg(long, value_enum, ignore_case = true, default_value_t = FakeCandidateValidationError::InvalidOutputs)] pub fake_validation_error: FakeCandidateValidationError, diff --git a/node/malus/src/variants/suggest_garbage_candidate.rs b/node/malus/src/variants/suggest_garbage_candidate.rs index 9fd8f6473bde..7d301c194b44 100644 --- a/node/malus/src/variants/suggest_garbage_candidate.rs +++ b/node/malus/src/variants/suggest_garbage_candidate.rs @@ -88,14 +88,15 @@ where "Received request to second candidate", ); - // Need to draw value from Bernoulli distribution with given probability of success defined by the clap parameter. - // Note that clap parameter must be f64 since this is expected by the Bernoulli::new() function. - // It must be converted from u8, due to the lack of support for the .range() call on u64 in the clap crate. + // Need to draw value from Bernoulli distribution with given probability of success + // defined by the clap parameter. Note that clap parameter must be f64 since this is + // expected by the Bernoulli::new() function. It must be converted from u8, due to + // the lack of support for the .range() call on u64 in the clap crate. let distribution = Bernoulli::new(self.percentage / 100.0) .expect("Invalid probability! Percentage must be in range [0..=100]."); - // Draw a random boolean from the Bernoulli distribution with probability of true equal to `p`. - // We use `rand::thread_rng` as the source of randomness. + // Draw a random boolean from the Bernoulli distribution with probability of true + // equal to `p`. We use `rand::thread_rng` as the source of randomness. let generate_malicious_candidate = distribution.sample(&mut rand::thread_rng()); if generate_malicious_candidate == true { diff --git a/node/metrics/src/lib.rs b/node/metrics/src/lib.rs index 69b3771d696a..9cb0f289a580 100644 --- a/node/metrics/src/lib.rs +++ b/node/metrics/src/lib.rs @@ -19,7 +19,8 @@ //! Collects a bunch of metrics providers and related features such as //! `Metronome` for usage with metrics collections. //! -//! This crate also reexports Prometheus metric types which are expected to be implemented by subsystems. +//! This crate also reexports Prometheus metric types which are expected to be implemented by +//! subsystems. #![deny(missing_docs)] #![deny(unused_imports)] diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index bc85f54177cb..803a56251495 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -102,11 +102,13 @@ impl RecentlyOutdated { // Aggression has 3 levels: // // * Aggression Level 0: The basic behaviors described above. -// * Aggression Level 1: The originator of a message sends to all peers. Other peers follow the rules above. -// * Aggression Level 2: All peers send all messages to all their row and column neighbors. -// This means that each validator will, on average, receive each message approximately `2*sqrt(n)` times. -// The aggression level of messages pertaining to a block increases when that block is unfinalized and -// is a child of the finalized block. +// * Aggression Level 1: The originator of a message sends to all peers. Other peers follow the +// rules above. +// * Aggression Level 2: All peers send all messages to all their row and column neighbors. This +// means that each validator will, on average, receive each message approximately `2*sqrt(n)` +// times. +// The aggression level of messages pertaining to a block increases when that block is unfinalized +// and is a child of the finalized block. // This means that only one block at a time has its messages propagated with aggression > 0. // // A note on aggression thresholds: changes in propagation apply only to blocks which are the @@ -120,7 +122,8 @@ impl RecentlyOutdated { struct AggressionConfig { /// Aggression level 1: all validators send all their own messages to all peers. l1_threshold: Option, - /// Aggression level 2: level 1 + all validators send all messages to all peers in the X and Y dimensions. + /// Aggression level 2: level 1 + all validators send all messages to all peers in the X and Y + /// dimensions. l2_threshold: Option, /// How often to re-send messages to all targeted recipients. /// This applies to all unfinalized blocks. @@ -167,11 +170,12 @@ struct State { blocks: HashMap, /// Our view updates to our peers can race with `NewBlocks` updates. We store messages received - /// against the directly mentioned blocks in our view in this map until `NewBlocks` is received. + /// against the directly mentioned blocks in our view in this map until `NewBlocks` is + /// received. /// - /// As long as the parent is already in the `blocks` map and `NewBlocks` messages aren't delayed - /// by more than a block length, this strategy will work well for mitigating the race. This is - /// also a race that occurs typically on local networks. + /// As long as the parent is already in the `blocks` map and `NewBlocks` messages aren't + /// delayed by more than a block length, this strategy will work well for mitigating the race. + /// This is also a race that occurs typically on local networks. pending_known: HashMap>, /// Peer data is partially stored here, and partially inline within the [`BlockEntry`]s @@ -947,7 +951,8 @@ impl State { } } - // Invariant: to our knowledge, none of the peers except for the `source` know about the assignment. + // Invariant: to our knowledge, none of the peers except for the `source` know about the + // assignment. metrics.on_assignment_imported(); let topology = self.topologies.get_topology(entry.session); @@ -1239,7 +1244,8 @@ impl State { } } - // Invariant: to our knowledge, none of the peers except for the `source` know about the approval. + // Invariant: to our knowledge, none of the peers except for the `source` know about the + // approval. metrics.on_approval_imported(); let required_routing = match entry.candidates.get_mut(candidate_index as usize) { @@ -1925,9 +1931,9 @@ const fn ensure_size_not_zero(size: usize) -> usize { } /// The maximum amount of assignments per batch is 33% of maximum allowed by protocol. -/// This is an arbitrary value. Bumping this up increases the maximum amount of approvals or assignments -/// we send in a single message to peers. Exceeding `MAX_NOTIFICATION_SIZE` will violate the protocol -/// configuration. +/// This is an arbitrary value. Bumping this up increases the maximum amount of approvals or +/// assignments we send in a single message to peers. Exceeding `MAX_NOTIFICATION_SIZE` will violate +/// the protocol configuration. pub const MAX_ASSIGNMENT_BATCH_SIZE: usize = ensure_size_not_zero( MAX_NOTIFICATION_SIZE as usize / std::mem::size_of::<(IndirectAssignmentCert, CandidateIndex)>() / diff --git a/node/network/availability-distribution/src/requester/fetch_task/mod.rs b/node/network/availability-distribution/src/requester/fetch_task/mod.rs index f87e1888bb10..191ee2acd973 100644 --- a/node/network/availability-distribution/src/requester/fetch_task/mod.rs +++ b/node/network/availability-distribution/src/requester/fetch_task/mod.rs @@ -315,7 +315,8 @@ impl RunningTask { continue }, }; - // We drop the span so that the span is not active whilst we validate and store the chunk. + // We drop the span so that the span is not active whilst we validate and store the + // chunk. drop(_chunk_recombine_span); let _chunk_validate_and_store_span = span .child("validate-and-store-chunk") diff --git a/node/network/availability-distribution/src/requester/mod.rs b/node/network/availability-distribution/src/requester/mod.rs index e27f40982ae8..446988f7cc0d 100644 --- a/node/network/availability-distribution/src/requester/mod.rs +++ b/node/network/availability-distribution/src/requester/mod.rs @@ -114,8 +114,8 @@ impl Requester { .with_string_tag("leaf", format!("{:?}", leaf.hash)) .with_stage(jaeger::Stage::AvailabilityDistribution); - // Order important! We need to handle activated, prior to deactivated, otherwise we might - // cancel still needed jobs. + // Order important! We need to handle activated, prior to deactivated, otherwise we + // might cancel still needed jobs. self.start_requesting_chunks(ctx, runtime, leaf, &span).await?; } @@ -168,8 +168,8 @@ impl Requester { // any tasks separately. // // The next time the subsystem receives leaf update, some of spawned task will be bumped - // to be live in fresh relay parent, while some might get dropped due to the current leaf - // being deactivated. + // to be live in fresh relay parent, while some might get dropped due to the current + // leaf being deactivated. self.add_cores(ctx, runtime, leaf, leaf_session_index, cores, span).await?; } @@ -177,7 +177,6 @@ impl Requester { } /// Stop requesting chunks for obsolete heads. - /// fn stop_requesting_chunks(&mut self, obsolete_leaves: impl Iterator) { let obsolete_leaves: HashSet<_> = obsolete_leaves.collect(); self.fetches.retain(|_, task| { @@ -226,10 +225,10 @@ impl Requester { .with_session_info( context, runtime, - // We use leaf here, the relay_parent must be in the same session as the - // leaf. This is guaranteed by runtime which ensures that cores are cleared - // at session boundaries. At the same time, only leaves are guaranteed to - // be fetchable by the state trie. + // We use leaf here, the relay_parent must be in the same session as + // the leaf. This is guaranteed by runtime which ensures that cores are + // cleared at session boundaries. At the same time, only leaves are + // guaranteed to be fetchable by the state trie. leaf, leaf_session_index, |info| FetchTaskConfig::new(leaf, &core, tx, metrics, info, span), diff --git a/node/network/availability-recovery/src/futures_undead.rs b/node/network/availability-recovery/src/futures_undead.rs index 225f6693a725..04ef3e749399 100644 --- a/node/network/availability-recovery/src/futures_undead.rs +++ b/node/network/availability-recovery/src/futures_undead.rs @@ -23,7 +23,6 @@ //! was almost done, thus we would have wasted time with our impatience. By simply making them //! not count towards length, we can make sure to have enough "live" requests ongoing, while at the //! same time taking advantage of some maybe "late" response from the undead. -//! use std::{ pin::Pin, diff --git a/node/network/availability-recovery/src/lib.rs b/node/network/availability-recovery/src/lib.rs index e8503ee454a2..fb0cdb720571 100644 --- a/node/network/availability-recovery/src/lib.rs +++ b/node/network/availability-recovery/src/lib.rs @@ -111,7 +111,8 @@ const SMALL_POV_LIMIT: usize = 128 * 1024; pub enum RecoveryStrategy { /// We always try the backing group first, then fallback to validator chunks. BackersFirstAlways, - /// We try the backing group first if PoV size is lower than specified, then fallback to validator chunks. + /// We try the backing group first if PoV size is lower than specified, then fallback to + /// validator chunks. BackersFirstIfSizeLower(usize), /// We always recover using validator chunks. ChunksAlways, @@ -132,7 +133,8 @@ impl RecoveryStrategy { } } - /// Returns the PoV size limit in bytes for `BackersFirstIfSizeLower` strategy, otherwise `None`. + /// Returns the PoV size limit in bytes for `BackersFirstIfSizeLower` strategy, otherwise + /// `None`. pub fn pov_size_limit(&self) -> Option { match *self { RecoveryStrategy::BackersFirstIfSizeLower(limit) => Some(limit), @@ -165,8 +167,8 @@ struct RequestChunksFromValidators { /// /// including failed ones. total_received_responses: usize, - /// a random shuffling of the validators which indicates the order in which we connect to the validators and - /// request the chunk from them. + /// a random shuffling of the validators which indicates the order in which we connect to the + /// validators and request the chunk from them. shuffling: VecDeque, /// Chunks received so far. received_chunks: HashMap, @@ -215,7 +217,8 @@ enum ErasureTask { HashMap, oneshot::Sender>, ), - /// Re-encode `AvailableData` into erasure chunks in order to verify the provided root hash of the Merkle tree. + /// Re-encode `AvailableData` into erasure chunks in order to verify the provided root hash of + /// the Merkle tree. Reencode(usize, Hash, AvailableData, oneshot::Sender>), } @@ -808,8 +811,8 @@ where self.params.metrics.on_recovery_started(); loop { - // These only fail if we cannot reach the underlying subsystem, which case there is nothing - // meaningful we can do. + // These only fail if we cannot reach the underlying subsystem, which case there is + // nothing meaningful we can do. match self.source { Source::RequestFromBackers(ref mut from_backers) => { match from_backers.run(&self.params, &mut self.sender).await { @@ -1008,7 +1011,8 @@ async fn launch_recovery_task( ); backing_group = backing_group.filter(|_| { - // We keep the backing group only if `1/3` of chunks sum up to less than `small_pov_limit`. + // We keep the backing group only if `1/3` of chunks sum up to less than + // `small_pov_limit`. prefer_backing_group }); } @@ -1194,18 +1198,21 @@ impl AvailabilityRecoverySubsystem { let (erasure_task_tx, erasure_task_rx) = futures::channel::mpsc::channel(16); let mut erasure_task_rx = erasure_task_rx.fuse(); - // `ThreadPoolBuilder` spawns the tasks using `spawn_blocking`. For each worker there will be a `mpsc` channel created. - // Each of these workers take the `Receiver` and poll it in an infinite loop. - // All of the sender ends of the channel are sent as a vec which we then use to create a `Cycle` iterator. - // We use this iterator to assign work in a round-robin fashion to the workers in the pool. + // `ThreadPoolBuilder` spawns the tasks using `spawn_blocking`. For each worker there will + // be a `mpsc` channel created. Each of these workers take the `Receiver` and poll it in an + // infinite loop. All of the sender ends of the channel are sent as a vec which we then use + // to create a `Cycle` iterator. We use this iterator to assign work in a round-robin + // fashion to the workers in the pool. // // How work is dispatched to the pool from the recovery tasks: - // - Once a recovery task finishes retrieving the availability data, it needs to reconstruct from chunks and/or + // - Once a recovery task finishes retrieving the availability data, it needs to reconstruct + // from chunks and/or // re-encode the data which are heavy CPU computations. - // To do so it sends an `ErasureTask` to the main loop via the `erasure_task` channel, and waits for the results - // over a `oneshot` channel. + // To do so it sends an `ErasureTask` to the main loop via the `erasure_task` channel, and + // waits for the results over a `oneshot` channel. // - In the subsystem main loop we poll the `erasure_task_rx` receiver. - // - We forward the received `ErasureTask` to the `next()` sender yielded by the `Cycle` iterator. + // - We forward the received `ErasureTask` to the `next()` sender yielded by the `Cycle` + // iterator. // - Some worker thread handles it and sends the response over the `oneshot` channel. // Create a thread pool with 2 workers. @@ -1348,11 +1355,13 @@ impl ThreadPoolBuilder { // Creates a pool of `size` workers, where 1 <= `size` <= `MAX_THREADS`. // // Each worker is created by `spawn_blocking` and takes the receiver side of a channel - // while all of the senders are returned to the caller. Each worker runs `erasure_task_thread` that - // polls the `Receiver` for an `ErasureTask` which is expected to be CPU intensive. The larger - // the input (more or larger chunks/availability data), the more CPU cycles will be spent. + // while all of the senders are returned to the caller. Each worker runs `erasure_task_thread` + // that polls the `Receiver` for an `ErasureTask` which is expected to be CPU intensive. The + // larger the input (more or larger chunks/availability data), the more CPU cycles will be + // spent. // - // For example, for 32KB PoVs, we'd expect re-encode to eat as much as 90ms and 500ms for 2.5MiB. + // For example, for 32KB PoVs, we'd expect re-encode to eat as much as 90ms and 500ms for + // 2.5MiB. // // After executing such a task, the worker sends the response via a provided `oneshot` sender. // diff --git a/node/network/availability-recovery/src/tests.rs b/node/network/availability-recovery/src/tests.rs index 26a99e91a5e2..c5647a12f589 100644 --- a/node/network/availability-recovery/src/tests.rs +++ b/node/network/availability-recovery/src/tests.rs @@ -817,7 +817,8 @@ fn wrong_chunk_index_leads_to_recovery_error() { let candidate_hash = test_state.candidate.hash(); - // These chunks should fail the index check as they don't have the correct index for validator. + // These chunks should fail the index check as they don't have the correct index for + // validator. test_state.chunks[1] = test_state.chunks[0].clone(); test_state.chunks[2] = test_state.chunks[0].clone(); test_state.chunks[3] = test_state.chunks[0].clone(); diff --git a/node/network/bridge/src/rx/mod.rs b/node/network/bridge/src/rx/mod.rs index 11a2dc6be83a..950bb3d6e6da 100644 --- a/node/network/bridge/src/rx/mod.rs +++ b/node/network/bridge/src/rx/mod.rs @@ -14,7 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! The Network Bridge Subsystem - handles _incoming_ messages from the network, forwarded to the relevant subsystems. +//! The Network Bridge Subsystem - handles _incoming_ messages from the network, forwarded to the +//! relevant subsystems. use super::*; use always_assert::never; @@ -86,7 +87,8 @@ pub struct NetworkBridgeRx { } impl NetworkBridgeRx { - /// Create a new network bridge subsystem with underlying network service and authority discovery service. + /// Create a new network bridge subsystem with underlying network service and authority + /// discovery service. /// /// This assumes that the network service has had the notifications protocol for the network /// bridge already registered. See [`peers_sets_info`](peers_sets_info). diff --git a/node/network/bridge/src/rx/tests.rs b/node/network/bridge/src/rx/tests.rs index 078f6591ae2a..e18a7e541832 100644 --- a/node/network/bridge/src/rx/tests.rs +++ b/node/network/bridge/src/rx/tests.rs @@ -795,8 +795,9 @@ fn peer_messages_sent_via_overseer() { network_handle.disconnect_peer(peer.clone(), PeerSet::Validation).await; - // Approval distribution message comes first, and the message is only sent to that subsystem. - // then a disconnection event arises that is sent to all validation networking subsystems. + // Approval distribution message comes first, and the message is only sent to that + // subsystem. then a disconnection event arises that is sent to all validation networking + // subsystems. assert_matches!( virtual_overseer.recv().await, diff --git a/node/network/bridge/src/tx/mod.rs b/node/network/bridge/src/tx/mod.rs index 2b54f6f0f06d..93916dd70fec 100644 --- a/node/network/bridge/src/tx/mod.rs +++ b/node/network/bridge/src/tx/mod.rs @@ -61,7 +61,8 @@ pub struct NetworkBridgeTx { } impl NetworkBridgeTx { - /// Create a new network bridge subsystem with underlying network service and authority discovery service. + /// Create a new network bridge subsystem with underlying network service and authority + /// discovery service. /// /// This assumes that the network service has had the notifications protocol for the network /// bridge already registered. See [`peers_sets_info`](peers_sets_info). diff --git a/node/network/bridge/src/validator_discovery.rs b/node/network/bridge/src/validator_discovery.rs index 098416c5b88d..d4d1df3da467 100644 --- a/node/network/bridge/src/validator_discovery.rs +++ b/node/network/bridge/src/validator_discovery.rs @@ -106,9 +106,10 @@ impl Service { /// It will ask the network to connect to the validators and not disconnect /// from them at least until the next request is issued for the same peer set. /// - /// This method will also disconnect from previously connected validators not in the `validator_ids` set. - /// it takes `network_service` and `authority_discovery_service` by value - /// and returns them as a workaround for the Future: Send requirement imposed by async function implementation. + /// This method will also disconnect from previously connected validators not in the + /// `validator_ids` set. it takes `network_service` and `authority_discovery_service` by value + /// and returns them as a workaround for the Future: Send requirement imposed by async function + /// implementation. pub async fn on_request( &mut self, validator_ids: Vec, diff --git a/node/network/collator-protocol/src/collator_side/mod.rs b/node/network/collator-protocol/src/collator_side/mod.rs index 39b23c152cbb..e4adfdc9d941 100644 --- a/node/network/collator-protocol/src/collator_side/mod.rs +++ b/node/network/collator-protocol/src/collator_side/mod.rs @@ -225,8 +225,8 @@ struct State { /// Our validator groups per active leaf. our_validators_groups: HashMap, - /// The mapping from [`PeerId`] to [`HashSet`]. This is filled over time as we learn the [`PeerId`]'s - /// by `PeerConnected` events. + /// The mapping from [`PeerId`] to [`HashSet`]. This is filled over time + /// as we learn the [`PeerId`]'s by `PeerConnected` events. peer_ids: HashMap>, /// Tracks which validators we want to stay connected to. @@ -241,8 +241,8 @@ struct State { /// All collation fetching requests that are still waiting to be answered. /// - /// They are stored per relay parent, when our view changes and the relay parent moves out, we will cancel the fetch - /// request. + /// They are stored per relay parent, when our view changes and the relay parent moves out, we + /// will cancel the fetch request. waiting_collation_fetches: HashMap, /// Active collation fetches. @@ -526,8 +526,8 @@ async fn connect_to_validators( /// Advertise collation to the given `peer`. /// -/// This will only advertise a collation if there exists one for the given `relay_parent` and the given `peer` is -/// set as validator for our para at the given `relay_parent`. +/// This will only advertise a collation if there exists one for the given `relay_parent` and the +/// given `peer` is set as validator for our para at the given `relay_parent`. #[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)] async fn advertise_collation( ctx: &mut Context, @@ -638,7 +638,8 @@ async fn process_msg( ); }, NetworkBridgeUpdate(event) => { - // We should count only this shoulder in the histogram, as other shoulders are just introducing noise + // We should count only this shoulder in the histogram, as other shoulders are just + // introducing noise let _ = state.metrics.time_process_msg(); if let Err(e) = handle_network_msg(ctx, runtime, state, event).await { diff --git a/node/network/collator-protocol/src/collator_side/tests.rs b/node/network/collator-protocol/src/collator_side/tests.rs index 757ef813a3d0..e406e5d869cc 100644 --- a/node/network/collator-protocol/src/collator_side/tests.rs +++ b/node/network/collator-protocol/src/collator_side/tests.rs @@ -160,8 +160,8 @@ impl TestState { /// Generate a new relay parent and inform the subsystem about the new view. /// - /// If `merge_views == true` it means the subsystem will be informed that we are working on the old `relay_parent` - /// and the new one. + /// If `merge_views == true` it means the subsystem will be informed that we are working on the + /// old `relay_parent` and the new one. async fn advance_to_new_round( &mut self, virtual_overseer: &mut VirtualOverseer, @@ -901,7 +901,8 @@ fn collate_on_two_different_relay_chain_blocks() { let old_relay_parent = test_state.relay_parent; - // Advance to a new round, while informing the subsystem that the old and the new relay parent are active. + // Advance to a new round, while informing the subsystem that the old and the new relay + // parent are active. test_state.advance_to_new_round(virtual_overseer, true).await; distribute_collation(virtual_overseer, &test_state, true).await; @@ -1085,7 +1086,8 @@ where .await .unwrap(); - // Keep the feedback channel alive because we need to use it to inform about the finished transfer. + // Keep the feedback channel alive because we need to use it to inform about the + // finished transfer. let feedback_tx = assert_matches!( rx.await, Ok(full_response) => { diff --git a/node/network/collator-protocol/src/collator_side/validators_buffer.rs b/node/network/collator-protocol/src/collator_side/validators_buffer.rs index 851923a6d0d4..13ed3f66e0f1 100644 --- a/node/network/collator-protocol/src/collator_side/validators_buffer.rs +++ b/node/network/collator-protocol/src/collator_side/validators_buffer.rs @@ -23,9 +23,9 @@ //! We keep a simple FIFO buffer of N validator groups and a bitvec for each advertisement, //! 1 indicating we want to be connected to i-th validator in a buffer, 0 otherwise. //! -//! The bit is set to 1 for the whole **group** whenever it's inserted into the buffer. Given a relay -//! parent, one can reset a bit back to 0 for particular **validator**. For example, if a collation -//! was fetched or some timeout has been hit. +//! The bit is set to 1 for the whole **group** whenever it's inserted into the buffer. Given a +//! relay parent, one can reset a bit back to 0 for particular **validator**. For example, if a +//! collation was fetched or some timeout has been hit. //! //! The bitwise OR over known advertisements gives us validators indices for connection request. diff --git a/node/network/collator-protocol/src/validator_side/tests.rs b/node/network/collator-protocol/src/validator_side/tests.rs index a2e92e8c78d2..47409e8d10f3 100644 --- a/node/network/collator-protocol/src/validator_side/tests.rs +++ b/node/network/collator-protocol/src/validator_side/tests.rs @@ -730,7 +730,8 @@ fn reject_connection_to_next_group() { }) } -// Ensure that we fetch a second collation, after the first checked collation was found to be invalid. +// Ensure that we fetch a second collation, after the first checked collation was found to be +// invalid. #[test] fn fetch_next_collation_on_invalid_collation() { let test_state = TestState::default(); diff --git a/node/network/dispute-distribution/src/lib.rs b/node/network/dispute-distribution/src/lib.rs index a39f78358f44..ad99bc41fa64 100644 --- a/node/network/dispute-distribution/src/lib.rs +++ b/node/network/dispute-distribution/src/lib.rs @@ -60,8 +60,8 @@ use self::sender::{DisputeSender, DisputeSenderMessage}; /// ## The receiver [`DisputesReceiver`] /// -/// The receiving side is implemented as `DisputesReceiver` and is run as a separate long running task within -/// this subsystem ([`DisputesReceiver::run`]). +/// The receiving side is implemented as `DisputesReceiver` and is run as a separate long running +/// task within this subsystem ([`DisputesReceiver::run`]). /// /// Conceptually all the receiver has to do, is waiting for incoming requests which are passed in /// via a dedicated channel and forwarding them to the dispute coordinator via @@ -101,8 +101,8 @@ const LOG_TARGET: &'static str = "parachain::dispute-distribution"; /// Rate limit on the `receiver` side. /// -/// If messages from one peer come in at a higher rate than every `RECEIVE_RATE_LIMIT` on average, we -/// start dropping messages from that peer to enforce that limit. +/// If messages from one peer come in at a higher rate than every `RECEIVE_RATE_LIMIT` on average, +/// we start dropping messages from that peer to enforce that limit. pub const RECEIVE_RATE_LIMIT: Duration = Duration::from_millis(100); /// Rate limit on the `sender` side. diff --git a/node/network/dispute-distribution/src/receiver/batches/batch.rs b/node/network/dispute-distribution/src/receiver/batches/batch.rs index 75f37107dff9..11380b7c072e 100644 --- a/node/network/dispute-distribution/src/receiver/batches/batch.rs +++ b/node/network/dispute-distribution/src/receiver/batches/batch.rs @@ -192,8 +192,8 @@ impl Batch { /// Calculate when the next tick should happen. /// - /// This will usually return `now + BATCH_COLLECTING_INTERVAL`, except if the lifetime of this batch - /// would exceed `MAX_BATCH_LIFETIME`. + /// This will usually return `now + BATCH_COLLECTING_INTERVAL`, except if the lifetime of this + /// batch would exceed `MAX_BATCH_LIFETIME`. /// /// # Arguments /// diff --git a/node/network/dispute-distribution/src/receiver/batches/waiting_queue.rs b/node/network/dispute-distribution/src/receiver/batches/waiting_queue.rs index 72f6e80a26a4..9a5e665a5756 100644 --- a/node/network/dispute-distribution/src/receiver/batches/waiting_queue.rs +++ b/node/network/dispute-distribution/src/receiver/batches/waiting_queue.rs @@ -50,8 +50,8 @@ impl WaitingQueue { /// Push a `PendingWake`. /// - /// The next call to `wait_ready` will make sure to wake soon enough to process that new event in a - /// timely manner. + /// The next call to `wait_ready` will make sure to wake soon enough to process that new event + /// in a timely manner. pub fn push(&mut self, wake: PendingWake) { self.pending_wakes.push(wake); // Reset timer as it is potentially obsolete now: diff --git a/node/network/dispute-distribution/src/receiver/mod.rs b/node/network/dispute-distribution/src/receiver/mod.rs index ed108a67fac3..827a77281ccb 100644 --- a/node/network/dispute-distribution/src/receiver/mod.rs +++ b/node/network/dispute-distribution/src/receiver/mod.rs @@ -382,11 +382,11 @@ where if let Err(pending_response) = batch_result { // We don't expect honest peers to send redundant votes within a single batch, // as the timeout for retry is much higher. Still we don't want to punish the - // node as it might not be the node's fault. Some other (malicious) node could have been - // faster sending the same votes in order to harm the reputation of that honest - // node. Given that we already have a rate limit, if a validator chooses to - // waste available rate with redundant votes - so be it. The actual dispute - // resolution is unaffected. + // node as it might not be the node's fault. Some other (malicious) node could + // have been faster sending the same votes in order to harm the reputation of + // that honest node. Given that we already have a rate limit, if a validator + // chooses to waste available rate with redundant votes - so be it. The actual + // dispute resolution is unaffected. gum::debug!( target: LOG_TARGET, ?peer, diff --git a/node/network/dispute-distribution/src/sender/send_task.rs b/node/network/dispute-distribution/src/sender/send_task.rs index fcd670ff9ce9..18c66066d162 100644 --- a/node/network/dispute-distribution/src/sender/send_task.rs +++ b/node/network/dispute-distribution/src/sender/send_task.rs @@ -45,8 +45,8 @@ use crate::{ /// /// The unit of work for a `SendTask` is an authority/validator. pub struct SendTask { - /// The request we are supposed to get out to all `parachain` validators of the dispute's session - /// and to all current authorities. + /// The request we are supposed to get out to all `parachain` validators of the dispute's + /// session and to all current authorities. request: DisputeRequest, /// The set of authorities we need to send our messages to. This set will change at session @@ -185,7 +185,8 @@ impl SendTask { /// Handle a finished response waiting task. /// - /// Called by `DisputeSender` upon reception of the corresponding message from our spawned `wait_response_task`. + /// Called by `DisputeSender` upon reception of the corresponding message from our spawned + /// `wait_response_task`. pub fn on_finished_send(&mut self, authority: &AuthorityDiscoveryId, result: TaskResult) { match result { TaskResult::Failed(err) => { @@ -204,8 +205,8 @@ impl SendTask { TaskResult::Succeeded => { let status = match self.deliveries.get_mut(&authority) { None => { - // Can happen when a sending became irrelevant while the response was already - // queued. + // Can happen when a sending became irrelevant while the response was + // already queued. gum::debug!( target: LOG_TARGET, candidate = ?self.request.0.candidate_receipt.hash(), diff --git a/node/network/gossip-support/src/lib.rs b/node/network/gossip-support/src/lib.rs index 62a071aa6f4c..3c178ad9dfa5 100644 --- a/node/network/gossip-support/src/lib.rs +++ b/node/network/gossip-support/src/lib.rs @@ -246,7 +246,8 @@ where { let mut connections = authorities_past_present_future(sender, leaf).await?; - // Remove all of our locally controlled validator indices so we don't connect to ourself. + // Remove all of our locally controlled validator indices so we don't connect to + // ourself. let connections = if remove_all_controlled(&self.keystore, &mut connections) != 0 { connections diff --git a/node/network/protocol/src/grid_topology.rs b/node/network/protocol/src/grid_topology.rs index 1b356f67617b..99dd513c4d79 100644 --- a/node/network/protocol/src/grid_topology.rs +++ b/node/network/protocol/src/grid_topology.rs @@ -17,17 +17,20 @@ //! Grid topology support implementation //! The basic operation of the 2D grid topology is that: //! * A validator producing a message sends it to its row-neighbors and its column-neighbors -//! * A validator receiving a message originating from one of its row-neighbors sends it to its column-neighbors -//! * A validator receiving a message originating from one of its column-neighbors sends it to its row-neighbors +//! * A validator receiving a message originating from one of its row-neighbors sends it to its +//! column-neighbors +//! * A validator receiving a message originating from one of its column-neighbors sends it to its +//! row-neighbors //! -//! This grid approach defines 2 unique paths for every validator to reach every other validator in at most 2 hops. +//! This grid approach defines 2 unique paths for every validator to reach every other validator in +//! at most 2 hops. //! //! However, we also supplement this with some degree of random propagation: //! every validator, upon seeing a message for the first time, propagates it to 8 random peers. //! This inserts some redundancy in case the grid topology isn't working or is being attacked - //! an adversary doesn't know which peers a validator will send to. -//! This is combined with the property that the adversary doesn't know which validators will elect to check a block. -//! +//! This is combined with the property that the adversary doesn't know which validators will elect +//! to check a block. use crate::PeerId; use polkadot_primitives::{AuthorityDiscoveryId, SessionIndex, ValidatorIndex}; @@ -188,7 +191,8 @@ impl GridNeighbors { (false, false) => RequiredRouting::None, (true, false) => RequiredRouting::GridY, // messages from X go to Y (false, true) => RequiredRouting::GridX, // messages from Y go to X - (true, true) => RequiredRouting::GridXY, // if the grid works as expected, this shouldn't happen. + (true, true) => RequiredRouting::GridXY, /* if the grid works as expected, this + * shouldn't happen. */ } } @@ -213,7 +217,8 @@ impl GridNeighbors { "Grid topology is unexpected, play it safe and send to X AND Y" ); RequiredRouting::GridXY - }, // if the grid works as expected, this shouldn't happen. + }, /* if the grid works as expected, this + * shouldn't happen. */ } } diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index 948c422a82f8..2df926ac55d8 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -91,7 +91,8 @@ impl Into for ObservedRole { /// Specialized wrapper around [`View`]. /// -/// Besides the access to the view itself, it also gives access to the [`jaeger::Span`] per leave/head. +/// Besides the access to the view itself, it also gives access to the [`jaeger::Span`] per +/// leave/head. #[derive(Debug, Clone, Default)] pub struct OurView { view: View, @@ -131,7 +132,8 @@ impl std::ops::Deref for OurView { } } -/// Construct a new [`OurView`] with the given chain heads, finalized number 0 and disabled [`jaeger::Span`]'s. +/// Construct a new [`OurView`] with the given chain heads, finalized number 0 and disabled +/// [`jaeger::Span`]'s. /// /// NOTE: Use for tests only. /// diff --git a/node/network/protocol/src/peer_set.rs b/node/network/protocol/src/peer_set.rs index ce47ac30811a..b9fa80d5c4a2 100644 --- a/node/network/protocol/src/peer_set.rs +++ b/node/network/protocol/src/peer_set.rs @@ -98,7 +98,8 @@ impl PeerSet { max_notification_size, handshake: None, set_config: SetConfig { - // Non-authority nodes don't need to accept incoming connections on this peer set: + // Non-authority nodes don't need to accept incoming connections on this peer + // set: in_peers: if is_authority == IsAuthority::Yes { 100 } else { 0 }, out_peers: 0, reserved_nodes: Vec::new(), diff --git a/node/network/protocol/src/request_response/incoming/mod.rs b/node/network/protocol/src/request_response/incoming/mod.rs index e2b8ad526488..445544838672 100644 --- a/node/network/protocol/src/request_response/incoming/mod.rs +++ b/node/network/protocol/src/request_response/incoming/mod.rs @@ -78,8 +78,8 @@ where /// reputation changes in that case. /// /// Params: - /// - The raw request to decode - /// - Reputation changes to apply for the peer in case decoding fails. + /// - The raw request to decode + /// - Reputation changes to apply for the peer in case decoding fails. fn try_from_raw( raw: sc_network::config::IncomingRequest, reputation_changes: Vec, diff --git a/node/network/protocol/src/request_response/mod.rs b/node/network/protocol/src/request_response/mod.rs index d895a90079cc..912447c0c626 100644 --- a/node/network/protocol/src/request_response/mod.rs +++ b/node/network/protocol/src/request_response/mod.rs @@ -110,9 +110,9 @@ pub const MAX_PARALLEL_STATEMENT_REQUESTS: u32 = 3; /// Response size limit for responses of POV like data. /// /// This is larger than `MAX_POV_SIZE` to account for protocol overhead and for additional data in -/// `CollationFetchingV1` or `AvailableDataFetchingV1` for example. We try to err on larger limits here -/// as a too large limit only allows an attacker to waste our bandwidth some more, a too low limit -/// might have more severe effects. +/// `CollationFetchingV1` or `AvailableDataFetchingV1` for example. We try to err on larger limits +/// here as a too large limit only allows an attacker to waste our bandwidth some more, a too low +/// limit might have more severe effects. const POV_RESPONSE_SIZE: u64 = MAX_POV_SIZE as u64 + 10_000; /// Maximum response sizes for `StatementFetchingV1`. diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 160132011589..4cdf0d8af467 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -185,8 +185,8 @@ struct VcPerPeerTracker { } impl VcPerPeerTracker { - /// Note that the remote should now be aware that a validator has seconded a given candidate (by hash) - /// based on a message that we have sent it from our local pool. + /// Note that the remote should now be aware that a validator has seconded a given candidate (by + /// hash) based on a message that we have sent it from our local pool. fn note_local(&mut self, h: CandidateHash) { if !note_hash(&mut self.local_observed, h) { gum::warn!( @@ -198,8 +198,8 @@ impl VcPerPeerTracker { } } - /// Note that the remote should now be aware that a validator has seconded a given candidate (by hash) - /// based on a message that it has sent us. + /// Note that the remote should now be aware that a validator has seconded a given candidate (by + /// hash) based on a message that it has sent us. /// /// Returns `true` if the peer was allowed to send us such a message, `false` otherwise. fn note_remote(&mut self, h: CandidateHash) -> bool { @@ -226,8 +226,8 @@ fn note_hash( /// knowledge that a peer has about goings-on in a relay parent. #[derive(Default)] struct PeerRelayParentKnowledge { - /// candidates that the peer is aware of because we sent statements to it. This indicates that we can - /// send other statements pertaining to that candidate. + /// candidates that the peer is aware of because we sent statements to it. This indicates that + /// we can send other statements pertaining to that candidate. sent_candidates: HashSet, /// candidates that peer is aware of, because we received statements from it. received_candidates: HashSet, @@ -321,13 +321,13 @@ impl PeerRelayParentKnowledge { } } - /// Attempt to update our view of the peer's knowledge with this statement's fingerprint based on - /// a message we are receiving from the peer. + /// Attempt to update our view of the peer's knowledge with this statement's fingerprint based + /// on a message we are receiving from the peer. /// /// Provide the maximum message count that we can receive per candidate. In practice we should - /// not receive more statements for any one candidate than there are members in the group assigned - /// to that para, but this maximum needs to be lenient to account for equivocations that may be - /// cross-group. As such, a maximum of 2 * `n_validators` is recommended. + /// not receive more statements for any one candidate than there are members in the group + /// assigned to that para, but this maximum needs to be lenient to account for equivocations + /// that may be cross-group. As such, a maximum of 2 * `n_validators` is recommended. /// /// This returns an error if the peer should not have sent us this message according to protocol /// rules for flood protection. @@ -490,13 +490,13 @@ impl PeerData { self.view_knowledge.get(relay_parent).map_or(false, |k| k.can_send(fingerprint)) } - /// Attempt to update our view of the peer's knowledge with this statement's fingerprint based on - /// a message we are receiving from the peer. + /// Attempt to update our view of the peer's knowledge with this statement's fingerprint based + /// on a message we are receiving from the peer. /// /// Provide the maximum message count that we can receive per candidate. In practice we should - /// not receive more statements for any one candidate than there are members in the group assigned - /// to that para, but this maximum needs to be lenient to account for equivocations that may be - /// cross-group. As such, a maximum of 2 * `n_validators` is recommended. + /// not receive more statements for any one candidate than there are members in the group + /// assigned to that para, but this maximum needs to be lenient to account for equivocations + /// that may be cross-group. As such, a maximum of 2 * `n_validators` is recommended. /// /// This returns an error if the peer should not have sent us this message according to protocol /// rules for flood protection. @@ -600,8 +600,8 @@ enum NotedStatement<'a> { /// Large statement fetching status. enum LargeStatementStatus { - /// We are currently fetching the statement data from a remote peer. We keep a list of other nodes - /// claiming to have that data and will fallback on them. + /// We are currently fetching the statement data from a remote peer. We keep a list of other + /// nodes claiming to have that data and will fallback on them. Fetching(FetchingInfo), /// Statement data is fetched or we got it locally via `StatementDistributionMessage::Share`. FetchedOrShared(CommittedCandidateReceipt), @@ -712,8 +712,8 @@ impl ActiveHeadData { /// to have been checked, including that the validator index is not out-of-bounds and /// the signature is valid. /// - /// Any other statements or those that reference a candidate we are not aware of cannot be accepted - /// and will return `NotedStatement::NotUseful`. + /// Any other statements or those that reference a candidate we are not aware of cannot be + /// accepted and will return `NotedStatement::NotUseful`. fn note_statement(&mut self, statement: SignedFullStatement) -> NotedStatement { let validator_index = statement.validator_index(); let comparator = StoredStatementComparator { @@ -1272,9 +1272,9 @@ async fn retrieve_statement_from_message<'a, Context>( } }, protocol_v1::StatementDistributionMessage::Statement(_, s) => { - // No fetch in progress, safe to return any statement immediately (we don't bother - // about normal network jitter which might cause `Valid` statements to arrive early - // for now.). + // No fetch in progress, safe to return any statement immediately (we don't + // bother about normal network jitter which might cause `Valid` statements to + // arrive early for now.). return Some(s) }, } @@ -1470,7 +1470,8 @@ async fn handle_incoming_message<'a, Context>( ); match rep { - // This happens when a Valid statement has been received but there is no corresponding Seconded + // This happens when a Valid statement has been received but there is no corresponding + // Seconded COST_UNEXPECTED_STATEMENT_UNKNOWN_CANDIDATE => { metrics.on_unexpected_statement_valid(); // Report peer merely if this is not a duplicate out-of-view statement that diff --git a/node/network/statement-distribution/src/tests.rs b/node/network/statement-distribution/src/tests.rs index 3f3e6e589616..62167f77a1e0 100644 --- a/node/network/statement-distribution/src/tests.rs +++ b/node/network/statement-distribution/src/tests.rs @@ -824,8 +824,8 @@ fn receiving_from_one_sends_to_another_and_to_candidate_backing() { }) .await; - // receive a seconded statement from peer A. it should be propagated onwards to peer B and to - // candidate backing. + // receive a seconded statement from peer A. it should be propagated onwards to peer B and + // to candidate backing. let statement = { let signing_context = SigningContext { parent_hash: hash_a, session_index }; @@ -2536,8 +2536,8 @@ fn handle_multiple_seconded_statements() { }) .await; - // receive a seconded statement from peer A. it should be propagated onwards to peer B and to - // candidate backing. + // receive a seconded statement from peer A. it should be propagated onwards to peer B and + // to candidate backing. let statement = { let signing_context = SigningContext { parent_hash: relay_parent_hash, session_index }; diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index a2d553779fdc..ebf33d5247b1 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -211,10 +211,10 @@ impl Handle { /// Wait for a block with the given hash to be in the active-leaves set. /// - /// The response channel responds if the hash was activated and is closed if the hash was deactivated. - /// Note that due the fact the overseer doesn't store the whole active-leaves set, only deltas, - /// the response channel may never return if the hash was deactivated before this call. - /// In this case, it's the caller's responsibility to ensure a timeout is set. + /// The response channel responds if the hash was activated and is closed if the hash was + /// deactivated. Note that due the fact the overseer doesn't store the whole active-leaves set, + /// only deltas, the response channel may never return if the hash was deactivated before this + /// call. In this case, it's the caller's responsibility to ensure a timeout is set. pub async fn wait_for_activation( &mut self, hash: Hash, @@ -355,7 +355,6 @@ pub async fn forward_events>(client: Arc

, mut hand /// +-----------+ /// | | /// +-----------+ -/// /// ``` /// /// [`Subsystem`]: trait.Subsystem.html @@ -363,8 +362,8 @@ pub async fn forward_events>(client: Arc

, mut hand /// # Example /// /// The [`Subsystems`] may be any type as long as they implement an expected interface. -/// Here, we create a mock validation subsystem and a few dummy ones and start the `Overseer` with them. -/// For the sake of simplicity the termination of the example is done with a timeout. +/// Here, we create a mock validation subsystem and a few dummy ones and start the `Overseer` with +/// them. For the sake of simplicity the termination of the example is done with a timeout. /// ``` /// # use std::time::Duration; /// # use futures::{executor, pin_mut, select, FutureExt}; @@ -394,11 +393,11 @@ pub async fn forward_events>(client: Arc

, mut hand /// impl overseer::Subsystem for ValidationSubsystem /// where /// Ctx: overseer::SubsystemContext< -/// Message=CandidateValidationMessage, -/// AllMessages=AllMessages, -/// Signal=OverseerSignal, -/// Error=SubsystemError, -/// >, +/// Message=CandidateValidationMessage, +/// AllMessages=AllMessages, +/// Signal=OverseerSignal, +/// Error=SubsystemError, +/// >, /// { /// fn start( /// self, @@ -426,10 +425,10 @@ pub async fn forward_events>(client: Arc

, mut hand /// /// let spawner = sp_core::testing::TaskExecutor::new(); /// let (overseer, _handle) = dummy_overseer_builder(spawner, AlwaysSupportsParachains, None) -/// .unwrap() -/// .replace_candidate_validation(|_| ValidationSubsystem) -/// .build() -/// .unwrap(); +/// .unwrap() +/// .replace_candidate_validation(|_| ValidationSubsystem) +/// .build() +/// .unwrap(); /// /// let timer = Delay::new(Duration::from_millis(50)).fuse(); /// @@ -825,7 +824,8 @@ where // If there are no leaves being deactivated, we don't need to send an update. // - // Our peers will be informed about our finalized block the next time we activating/deactivating some leaf. + // Our peers will be informed about our finalized block the next time we + // activating/deactivating some leaf. if !update.is_empty() { self.broadcast_signal(OverseerSignal::ActiveLeaves(update)).await?; } diff --git a/node/primitives/src/disputes/message.rs b/node/primitives/src/disputes/message.rs index 992d70ba1324..89d3ea6c0af9 100644 --- a/node/primitives/src/disputes/message.rs +++ b/node/primitives/src/disputes/message.rs @@ -105,8 +105,8 @@ impl DisputeMessage { /// - the invalid statement is indeed an invalid one /// - the valid statement is indeed a valid one /// - The passed `CandidateReceipt` has the correct hash (as signed in the statements). - /// - the given validator indices match with the given `ValidatorId`s in the statements, - /// given a `SessionInfo`. + /// - the given validator indices match with the given `ValidatorId`s in the statements, given a + /// `SessionInfo`. /// /// We don't check whether the given `SessionInfo` matches the `SessionIndex` in the /// statements, because we can't without doing a runtime query. Nevertheless this smart diff --git a/node/primitives/src/disputes/status.rs b/node/primitives/src/disputes/status.rs index 309225edc94b..d93c3ec846ce 100644 --- a/node/primitives/src/disputes/status.rs +++ b/node/primitives/src/disputes/status.rs @@ -16,7 +16,8 @@ use parity_scale_codec::{Decode, Encode}; -/// Timestamp based on the 1 Jan 1970 UNIX base, which is persistent across node restarts and OS reboots. +/// Timestamp based on the 1 Jan 1970 UNIX base, which is persistent across node restarts and OS +/// reboots. pub type Timestamp = u64; /// The status of dispute. @@ -88,8 +89,8 @@ impl DisputeStatus { } } - /// Transition the status to a new status after observing the dispute has concluded for the candidate. - /// This may be a no-op if the status was already concluded. + /// Transition the status to a new status after observing the dispute has concluded for the + /// candidate. This may be a no-op if the status was already concluded. pub fn conclude_for(self, now: Timestamp) -> DisputeStatus { match self { DisputeStatus::Active | DisputeStatus::Confirmed => DisputeStatus::ConcludedFor(now), @@ -98,8 +99,8 @@ impl DisputeStatus { } } - /// Transition the status to a new status after observing the dispute has concluded against the candidate. - /// This may be a no-op if the status was already concluded. + /// Transition the status to a new status after observing the dispute has concluded against the + /// candidate. This may be a no-op if the status was already concluded. pub fn conclude_against(self, now: Timestamp) -> DisputeStatus { match self { DisputeStatus::Active | DisputeStatus::Confirmed => diff --git a/node/primitives/src/lib.rs b/node/primitives/src/lib.rs index 1177dbc17caa..d49cd806d54e 100644 --- a/node/primitives/src/lib.rs +++ b/node/primitives/src/lib.rs @@ -180,8 +180,8 @@ impl std::fmt::Debug for Statement { impl Statement { /// Get the candidate hash referenced by this statement. /// - /// If this is a `Statement::Seconded`, this does hash the candidate receipt, which may be expensive - /// for large candidates. + /// If this is a `Statement::Seconded`, this does hash the candidate receipt, which may be + /// expensive for large candidates. pub fn candidate_hash(&self) -> CandidateHash { match *self { Statement::Valid(ref h) => *h, @@ -215,8 +215,8 @@ impl EncodeAs for Statement { /// /// Signing context and validator set should be apparent from context. /// -/// This statement is "full" in the sense that the `Seconded` variant includes the candidate receipt. -/// Only the compact `SignedStatement` is suitable for submission to the chain. +/// This statement is "full" in the sense that the `Seconded` variant includes the candidate +/// receipt. Only the compact `SignedStatement` is suitable for submission to the chain. pub type SignedFullStatement = Signed; /// Variant of `SignedFullStatement` where the signature has not yet been verified. @@ -256,8 +256,8 @@ pub enum InvalidCandidate { /// Result of the validation of the candidate. #[derive(Debug)] pub enum ValidationResult { - /// Candidate is valid. The validation process yields these outputs and the persisted validation - /// data used to form inputs. + /// Candidate is valid. The validation process yields these outputs and the persisted + /// validation data used to form inputs. Valid(CandidateCommitments, PersistedValidationData), /// Candidate is invalid. Invalid(InvalidCandidate), @@ -321,7 +321,8 @@ pub struct Collation { pub proof_of_validity: MaybeCompressedPoV, /// The number of messages processed from the DMQ. pub processed_downward_messages: u32, - /// The mark which specifies the block number up to which all inbound HRMP messages are processed. + /// The mark which specifies the block number up to which all inbound HRMP messages are + /// processed. pub hrmp_watermark: BlockNumber, } @@ -344,9 +345,9 @@ pub struct CollationResult { pub collation: Collation, /// An optional result sender that should be informed about a successfully seconded collation. /// - /// There is no guarantee that this sender is informed ever about any result, it is completely okay to just drop it. - /// However, if it is called, it should be called with the signed statement of a parachain validator seconding the - /// collation. + /// There is no guarantee that this sender is informed ever about any result, it is completely + /// okay to just drop it. However, if it is called, it should be called with the signed + /// statement of a parachain validator seconding the collation. pub result_sender: Option>, } @@ -362,8 +363,9 @@ impl CollationResult { /// Collation function. /// -/// Will be called with the hash of the relay chain block the parachain block should be build on and the -/// [`ValidationData`] that provides information about the state of the parachain on the relay chain. +/// Will be called with the hash of the relay chain block the parachain block should be build on and +/// the [`ValidationData`] that provides information about the state of the parachain on the relay +/// chain. /// /// Returns an optional [`CollationResult`]. #[cfg(not(target_os = "unknown"))] diff --git a/node/service/src/chain_spec.rs b/node/service/src/chain_spec.rs index a9e6b45f3b2d..7aabfa6e9185 100644 --- a/node/service/src/chain_spec.rs +++ b/node/service/src/chain_spec.rs @@ -529,11 +529,12 @@ fn kusama_staging_testnet_config_genesis(wasm_binary: &[u8]) -> kusama::RuntimeG hex!["12b782529c22032ed4694e0f6e7d486be7daa6d12088f6bc74d593b3900b8438"].into(), ]; - // for i in 1 2 3 4; do for j in stash controller; do subkey inspect "$SECRET//$i//$j"; done; done - // for i in 1 2 3 4; do for j in babe; do subkey --sr25519 inspect "$SECRET//$i//$j"; done; done - // for i in 1 2 3 4; do for j in grandpa; do subkey --ed25519 inspect "$SECRET//$i//$j"; done; done - // for i in 1 2 3 4; do for j in im_online; do subkey --sr25519 inspect "$SECRET//$i//$j"; done; done - // for i in 1 2 3 4; do for j in para_validator para_assignment; do subkey --sr25519 inspect "$SECRET//$i//$j"; done; done + // for i in 1 2 3 4; do for j in stash controller; do subkey inspect "$SECRET//$i//$j"; done; + // done for i in 1 2 3 4; do for j in babe; do subkey --sr25519 inspect "$SECRET//$i//$j"; done; + // done for i in 1 2 3 4; do for j in grandpa; do subkey --ed25519 inspect "$SECRET//$i//$j"; + // done; done for i in 1 2 3 4; do for j in im_online; do subkey --sr25519 inspect + // "$SECRET//$i//$j"; done; done for i in 1 2 3 4; do for j in para_validator para_assignment; + // do subkey --sr25519 inspect "$SECRET//$i//$j"; done; done let initial_authorities: Vec<( AccountId, AccountId, diff --git a/node/service/src/fake_runtime_api.rs b/node/service/src/fake_runtime_api.rs index b322114cbb75..d9553afa024b 100644 --- a/node/service/src/fake_runtime_api.rs +++ b/node/service/src/fake_runtime_api.rs @@ -16,7 +16,8 @@ //! Provides "fake" runtime API implementations //! -//! These are used to provide a type that implements these runtime APIs without requiring to import the native runtimes. +//! These are used to provide a type that implements these runtime APIs without requiring to import +//! the native runtimes. use beefy_primitives::ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}; use grandpa_primitives::AuthorityId as GrandpaId; diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index fa8cb8ec77f7..4dda57110825 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -696,9 +696,10 @@ pub const AVAILABILITY_CONFIG: AvailabilityConfig = AvailabilityConfig { /// This is an advanced feature and not recommended for general use. Generally, `build_full` is /// a better choice. /// -/// `overseer_enable_anyways` always enables the overseer, based on the provided `OverseerGenerator`, -/// regardless of the role the node has. The relay chain selection (longest or disputes-aware) is -/// still determined based on the role of the node. Likewise for authority discovery. +/// `overseer_enable_anyways` always enables the overseer, based on the provided +/// `OverseerGenerator`, regardless of the role the node has. The relay chain selection (longest or +/// disputes-aware) is still determined based on the role of the node. Likewise for authority +/// discovery. /// /// `workers_path` is used to get the path to the directory where auxiliary worker binaries reside. /// If not specified, the main binary's directory is searched first, then `/usr/lib/polkadot` is @@ -1331,9 +1332,10 @@ pub fn new_chain_ops( /// The actual "flavor", aka if it will use `Polkadot`, `Rococo` or `Kusama` is determined based on /// [`IdentifyVariant`] using the chain spec. /// -/// `overseer_enable_anyways` always enables the overseer, based on the provided `OverseerGenerator`, -/// regardless of the role the node has. The relay chain selection (longest or disputes-aware) is -/// still determined based on the role of the node. Likewise for authority discovery. +/// `overseer_enable_anyways` always enables the overseer, based on the provided +/// `OverseerGenerator`, regardless of the role the node has. The relay chain selection (longest or +/// disputes-aware) is still determined based on the role of the node. Likewise for authority +/// discovery. #[cfg(feature = "full-node")] pub fn build_full( config: Configuration, diff --git a/node/service/src/relay_chain_selection.rs b/node/service/src/relay_chain_selection.rs index afc0ce320610..189073783f0d 100644 --- a/node/service/src/relay_chain_selection.rs +++ b/node/service/src/relay_chain_selection.rs @@ -472,8 +472,8 @@ where let lag = initial_leaf_number.saturating_sub(subchain_number); self.metrics.note_approval_checking_finality_lag(lag); - // Messages sent to `approval-distrbution` are known to have high `ToF`, we need to spawn a task for sending - // the message to not block here and delay finality. + // Messages sent to `approval-distrbution` are known to have high `ToF`, we need to spawn a + // task for sending the message to not block here and delay finality. if let Some(spawn_handle) = &self.spawn_handle { let mut overseer_handle = self.overseer.clone(); let lag_update_task = async move { @@ -537,9 +537,10 @@ where error = ?e, "Call to `DetermineUndisputedChain` failed", ); - // We need to return a sane finality target. But, we are unable to ensure we are not - // finalizing something that is being disputed or has been concluded as invalid. We will be - // conservative here and not vote for finality above the ancestor passed in. + // We need to return a sane finality target. But, we are unable to ensure we + // are not finalizing something that is being disputed or has been concluded + // as invalid. We will be conservative here and not vote for finality above + // the ancestor passed in. return Ok(target_hash) }, }; diff --git a/node/service/src/tests.rs b/node/service/src/tests.rs index 424af4d22a26..95d5765bad45 100644 --- a/node/service/src/tests.rs +++ b/node/service/src/tests.rs @@ -498,8 +498,8 @@ struct CaseVars { /// ```raw /// genesis -- 0xA1 --- 0xA2 --- 0xA3 --- 0xA4(!avail) --- 0xA5(!avail) -/// \ -/// `- 0xB2 +/// \ +/// `- 0xB2 /// ``` fn chain_undisputed() -> CaseVars { let head: Hash = ChainBuilder::GENESIS_HASH; @@ -529,8 +529,8 @@ fn chain_undisputed() -> CaseVars { /// ```raw /// genesis -- 0xA1 --- 0xA2 --- 0xA3(disputed) --- 0xA4(!avail) --- 0xA5(!avail) -/// \ -/// `- 0xB2 +/// \ +/// `- 0xB2 /// ``` fn chain_0() -> CaseVars { let head: Hash = ChainBuilder::GENESIS_HASH; @@ -560,8 +560,8 @@ fn chain_0() -> CaseVars { /// ```raw /// genesis -- 0xA1 --- 0xA2(disputed) --- 0xA3 -/// \ -/// `- 0xB2 --- 0xB3(!available) +/// \ +/// `- 0xB2 --- 0xB3(!available) /// ``` fn chain_1() -> CaseVars { let head: Hash = ChainBuilder::GENESIS_HASH; @@ -588,8 +588,8 @@ fn chain_1() -> CaseVars { /// ```raw /// genesis -- 0xA1 --- 0xA2(disputed) --- 0xA3 -/// \ -/// `- 0xB2 --- 0xB3 +/// \ +/// `- 0xB2 --- 0xB3 /// ``` fn chain_2() -> CaseVars { let head: Hash = ChainBuilder::GENESIS_HASH; @@ -616,8 +616,8 @@ fn chain_2() -> CaseVars { /// ```raw /// genesis -- 0xA1 --- 0xA2 --- 0xA3(disputed) -/// \ -/// `- 0xB2 --- 0xB3 +/// \ +/// `- 0xB2 --- 0xB3 /// ``` fn chain_3() -> CaseVars { let head: Hash = ChainBuilder::GENESIS_HASH; @@ -644,10 +644,10 @@ fn chain_3() -> CaseVars { /// ```raw /// genesis -- 0xA1 --- 0xA2 --- 0xA3(disputed) -/// \ -/// `- 0xB2 --- 0xB3 +/// \ +/// `- 0xB2 --- 0xB3 /// -/// ? --- NEX(does_not_exist) +/// ? --- NEX(does_not_exist) /// ``` fn chain_4() -> CaseVars { let head: Hash = ChainBuilder::GENESIS_HASH; diff --git a/node/subsystem-test-helpers/src/lib.rs b/node/subsystem-test-helpers/src/lib.rs index 4170f22c5b86..fb908278aa7d 100644 --- a/node/subsystem-test-helpers/src/lib.rs +++ b/node/subsystem-test-helpers/src/lib.rs @@ -310,7 +310,8 @@ pub fn make_buffered_subsystem_context( /// Test a subsystem, mocking the overseer /// -/// Pass in two async closures: one mocks the overseer, the other runs the test from the perspective of a subsystem. +/// Pass in two async closures: one mocks the overseer, the other runs the test from the perspective +/// of a subsystem. /// /// Times out in 5 seconds. pub fn subsystem_test_harness( diff --git a/node/subsystem-types/src/lib.rs b/node/subsystem-types/src/lib.rs index 88c7165bcd80..f438a09592c1 100644 --- a/node/subsystem-types/src/lib.rs +++ b/node/subsystem-types/src/lib.rs @@ -82,8 +82,8 @@ pub struct ActivatedLeaf { pub status: LeafStatus, /// An associated [`jaeger::Span`]. /// - /// NOTE: Each span should only be kept active as long as the leaf is considered active and should be dropped - /// when the leaf is deactivated. + /// NOTE: Each span should only be kept active as long as the leaf is considered active and + /// should be dropped when the leaf is deactivated. pub span: Arc, } diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 8f2e3375b6f1..d5dcea7a2565 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -16,8 +16,8 @@ //! Message types for the overseer and subsystems. //! -//! These messages are intended to define the protocol by which different subsystems communicate with each -//! other and signals that they receive from an overseer to coordinate their work. +//! These messages are intended to define the protocol by which different subsystems communicate +//! with each other and signals that they receive from an overseer to coordinate their work. //! This is intended for use with the `polkadot-overseer` crate. //! //! Subsystems' APIs are defined separately from their implementation, leading to easier mocking. @@ -62,12 +62,13 @@ pub enum CandidateBackingMessage { /// Requests a set of backable candidates that could be backed in a child of the given /// relay-parent, referenced by its hash. GetBackedCandidates(Hash, Vec, oneshot::Sender>), - /// Note that the Candidate Backing subsystem should second the given candidate in the context of the - /// given relay-parent (ref. by hash). This candidate must be validated. + /// Note that the Candidate Backing subsystem should second the given candidate in the context + /// of the given relay-parent (ref. by hash). This candidate must be validated. Second(Hash, CandidateReceipt, PoV), - /// Note a validator's statement about a particular candidate. Disagreements about validity must be escalated - /// to a broader check by the Disputes Subsystem, though that escalation is deferred until the approval voting - /// stage to guarantee availability. Agreements are simply tallied until a quorum is reached. + /// Note a validator's statement about a particular candidate. Disagreements about validity + /// must be escalated to a broader check by the Disputes Subsystem, though that escalation is + /// deferred until the approval voting stage to guarantee availability. Agreements are simply + /// tallied until a quorum is reached. Statement(Hash, SignedFullStatement), } @@ -143,8 +144,8 @@ pub enum CandidateValidationMessage { /// Try to compile the given validation code and send back /// the outcome. /// - /// The validation code is specified by the hash and will be queried from the runtime API at the - /// given relay-parent. + /// The validation code is specified by the hash and will be queried from the runtime API at + /// the given relay-parent. PreCheck( // Relay-parent Hash, @@ -157,16 +158,16 @@ pub enum CandidateValidationMessage { #[derive(Debug, derive_more::From)] pub enum CollatorProtocolMessage { /// Signal to the collator protocol that it should connect to validators with the expectation - /// of collating on the given para. This is only expected to be called once, early on, if at all, - /// and only by the Collation Generation subsystem. As such, it will overwrite the value of - /// the previous signal. + /// of collating on the given para. This is only expected to be called once, early on, if at + /// all, and only by the Collation Generation subsystem. As such, it will overwrite the value + /// of the previous signal. /// /// This should be sent before any `DistributeCollation` message. CollateOn(ParaId), /// Provide a collation to distribute to validators with an optional result sender. /// - /// The result sender should be informed when at least one parachain validator seconded the collation. It is also - /// completely okay to just drop the sender. + /// The result sender should be informed when at least one parachain validator seconded the + /// collation. It is also completely okay to just drop the sender. DistributeCollation(CandidateReceipt, PoV, Option>), /// Report a collator as having provided an invalid collation. This should lead to disconnect /// and blacklist of the collator. @@ -174,7 +175,8 @@ pub enum CollatorProtocolMessage { /// Get a network bridge update. #[from] NetworkBridgeUpdate(NetworkBridgeEvent), - /// We recommended a particular candidate to be seconded, but it was invalid; penalize the collator. + /// We recommended a particular candidate to be seconded, but it was invalid; penalize the + /// collator. /// /// The hash is the relay parent. Invalid(Hash, CandidateReceipt), @@ -198,14 +200,15 @@ impl Default for CollatorProtocolMessage { pub enum DisputeCoordinatorMessage { /// Import statements by validators about a candidate. /// - /// The subsystem will silently discard ancient statements or sets of only dispute-specific statements for - /// candidates that are previously unknown to the subsystem. The former is simply because ancient - /// data is not relevant and the latter is as a DoS prevention mechanism. Both backing and approval - /// statements already undergo anti-DoS procedures in their respective subsystems, but statements - /// cast specifically for disputes are not necessarily relevant to any candidate the system is - /// already aware of and thus present a DoS vector. Our expectation is that nodes will notify each - /// other of disputes over the network by providing (at least) 2 conflicting statements, of which one is either - /// a backing or validation statement. + /// The subsystem will silently discard ancient statements or sets of only dispute-specific + /// statements for candidates that are previously unknown to the subsystem. The former is + /// simply because ancient data is not relevant and the latter is as a DoS prevention + /// mechanism. Both backing and approval statements already undergo anti-DoS procedures in + /// their respective subsystems, but statements cast specifically for disputes are not + /// necessarily relevant to any candidate the system is already aware of and thus present a DoS + /// vector. Our expectation is that nodes will notify each other of disputes over the network + /// by providing (at least) 2 conflicting statements, of which one is either a backing or + /// validation statement. /// /// This does not do any checking of the message signature. ImportStatements { @@ -222,16 +225,16 @@ pub enum DisputeCoordinatorMessage { /// /// This is: /// - we discarded the votes because - /// - they were ancient or otherwise invalid (result: `InvalidImport`) - /// - or we were not able to recover availability for an unknown candidate (result: + /// - they were ancient or otherwise invalid (result: `InvalidImport`) + /// - or we were not able to recover availability for an unknown candidate (result: /// `InvalidImport`) - /// - or were known already (in that case the result will still be `ValidImport`) + /// - or were known already (in that case the result will still be `ValidImport`) /// - or we recorded them because (`ValidImport`) - /// - we cast our own vote already on that dispute - /// - or we have approval votes on that candidate - /// - or other explicit votes on that candidate already recorded - /// - or recovered availability for the candidate - /// - or the imported statements are backing/approval votes, which are always accepted. + /// - we cast our own vote already on that dispute + /// - or we have approval votes on that candidate + /// - or other explicit votes on that candidate already recorded + /// - or recovered availability for the candidate + /// - or the imported statements are backing/approval votes, which are always accepted. pending_confirmation: Option>, }, /// Fetch a list of all recent disputes the coordinator is aware of. @@ -246,15 +249,17 @@ pub enum DisputeCoordinatorMessage { Vec<(SessionIndex, CandidateHash)>, oneshot::Sender>, ), - /// Sign and issue local dispute votes. A value of `true` indicates validity, and `false` invalidity. + /// Sign and issue local dispute votes. A value of `true` indicates validity, and `false` + /// invalidity. IssueLocalStatement(SessionIndex, CandidateHash, CandidateReceipt, bool), /// Determine the highest undisputed block within the given chain, based on where candidates /// were included. If even the base block should not be finalized due to a dispute, /// then `None` should be returned on the channel. /// - /// The block descriptions begin counting upwards from the block after the given `base_number`. The `base_number` - /// is typically the number of the last finalized block but may be slightly higher. This block - /// is inevitably going to be finalized so it is not accounted for by this function. + /// The block descriptions begin counting upwards from the block after the given `base_number`. + /// The `base_number` is typically the number of the last finalized block but may be slightly + /// higher. This block is inevitably going to be finalized so it is not accounted for by this + /// function. DetermineUndisputedChain { /// The lowest possible block to vote on. base: (BlockNumber, Hash), @@ -369,8 +374,8 @@ pub enum NetworkBridgeTxMessage { /// authority discovery has failed to resolve. failed: oneshot::Sender, }, - /// Alternative to `ConnectToValidators` in case you already know the `Multiaddrs` you want to be - /// connected to. + /// Alternative to `ConnectToValidators` in case you already know the `Multiaddrs` you want to + /// be connected to. ConnectToResolvedValidators { /// Each entry corresponds to the addresses of an already resolved validator. validator_addrs: Vec>, @@ -576,8 +581,8 @@ pub enum RuntimeApiRequest { OccupiedCoreAssumption, RuntimeApiSender>, ), - /// Get the persisted validation data for a particular para along with the current validation code - /// hash, matching the data hash against an expected one. + /// Get the persisted validation data for a particular para along with the current validation + /// code hash, matching the data hash against an expected one. AssumedValidationData( ParaId, Hash, @@ -595,10 +600,11 @@ pub enum RuntimeApiRequest { /// will inform on how the validation data should be computed if the para currently /// occupies a core. ValidationCode(ParaId, OccupiedCoreAssumption, RuntimeApiSender>), - /// Get validation code by its hash, either past, current or future code can be returned, as long as state is still - /// available. + /// Get validation code by its hash, either past, current or future code can be returned, as + /// long as state is still available. ValidationCodeByHash(ValidationCodeHash, RuntimeApiSender>), - /// Get a the candidate pending availability for a particular parachain by parachain / core index + /// Get a the candidate pending availability for a particular parachain by parachain / core + /// index CandidatePendingAvailability(ParaId, RuntimeApiSender>), /// Get all events concerning candidates (backing, inclusion, time-out) in the parent of /// the block in whose state this request is executed. @@ -623,8 +629,9 @@ pub enum RuntimeApiRequest { SubmitPvfCheckStatement(PvfCheckStatement, ValidatorSignature, RuntimeApiSender<()>), /// Returns code hashes of PVFs that require pre-checking by validators in the active set. PvfsRequirePrecheck(RuntimeApiSender>), - /// Get the validation code used by the specified para, taking the given `OccupiedCoreAssumption`, which - /// will inform on how the validation data should be computed if the para currently occupies a core. + /// Get the validation code used by the specified para, taking the given + /// `OccupiedCoreAssumption`, which will inform on how the validation data should be computed + /// if the para currently occupies a core. ValidationCodeHash( ParaId, OccupiedCoreAssumption, @@ -686,13 +693,15 @@ pub enum StatementDistributionMessage { NetworkBridgeUpdate(NetworkBridgeEvent), } -/// This data becomes intrinsics or extrinsics which should be included in a future relay chain block. +/// This data becomes intrinsics or extrinsics which should be included in a future relay chain +/// block. // It needs to be cloneable because multiple potential block authors can request copies. #[derive(Debug, Clone)] pub enum ProvisionableData { /// This bitfield indicates the availability of various candidate blocks. Bitfield(Hash, SignedAvailabilityBitfield), - /// The Candidate Backing subsystem believes that this candidate is valid, pending availability. + /// The Candidate Backing subsystem believes that this candidate is valid, pending + /// availability. BackedCandidate(CandidateReceipt), /// Misbehavior reports are self-contained proofs of validator misbehavior. MisbehaviorReport(Hash, ValidatorIndex, Misbehavior), @@ -716,11 +725,11 @@ pub struct ProvisionerInherentData { /// In all cases, the Hash is that of the relay parent. #[derive(Debug)] pub enum ProvisionerMessage { - /// This message allows external subsystems to request the set of bitfields and backed candidates - /// associated with a particular potential block hash. + /// This message allows external subsystems to request the set of bitfields and backed + /// candidates associated with a particular potential block hash. /// - /// This is expected to be used by a proposer, to inject that information into the `InherentData` - /// where it can be assembled into the `ParaInherent`. + /// This is expected to be used by a proposer, to inject that information into the + /// `InherentData` where it can be assembled into the `ParaInherent`. RequestInherentData(Hash, oneshot::Sender), /// This data should become part of a relay chain block ProvisionableData(Hash, ProvisionableData), diff --git a/node/subsystem-types/src/runtime_client.rs b/node/subsystem-types/src/runtime_client.rs index 196b928ad62b..4d8eddde73e9 100644 --- a/node/subsystem-types/src/runtime_client.rs +++ b/node/subsystem-types/src/runtime_client.rs @@ -138,7 +138,7 @@ pub trait RuntimeApiSubsystemClient { async fn on_chain_votes(&self, at: Hash) -> Result>, ApiError>; - /***** Added in v2 *****/ + /***** Added in v2 **** */ /// Get the session info for the given session, if stored. /// @@ -164,7 +164,8 @@ pub trait RuntimeApiSubsystemClient { /// NOTE: This function is only available since parachain host version 2. async fn pvfs_require_precheck(&self, at: Hash) -> Result, ApiError>; - /// Fetch the hash of the validation code used by a para, making the given `OccupiedCoreAssumption`. + /// Fetch the hash of the validation code used by a para, making the given + /// `OccupiedCoreAssumption`. /// /// NOTE: This function is only available since parachain host version 2. async fn validation_code_hash( @@ -174,7 +175,7 @@ pub trait RuntimeApiSubsystemClient { assumption: OccupiedCoreAssumption, ) -> Result, ApiError>; - /***** Added in v3 *****/ + /***** Added in v3 **** */ /// Returns all onchain disputes. /// This is a staging method! Do not use on production runtimes! diff --git a/node/subsystem-util/src/lib.rs b/node/subsystem-util/src/lib.rs index de869bd91f12..e0b81608ff2f 100644 --- a/node/subsystem-util/src/lib.rs +++ b/node/subsystem-util/src/lib.rs @@ -20,7 +20,8 @@ //! or determining what their validator ID is. These common interests are factored into //! this module. //! -//! This crate also reexports Prometheus metric types which are expected to be implemented by subsystems. +//! This crate also reexports Prometheus metric types which are expected to be implemented by +//! subsystems. #![warn(missing_docs)] @@ -60,7 +61,8 @@ pub use polkadot_node_network_protocol::MIN_GOSSIP_PEERS; pub use determine_new_blocks::determine_new_blocks; -/// These reexports are required so that external crates can use the `delegated_subsystem` macro properly. +/// These reexports are required so that external crates can use the `delegated_subsystem` macro +/// properly. pub mod reexports { pub use polkadot_overseer::gen::{SpawnedSubsystem, Spawner, Subsystem, SubsystemContext}; } @@ -367,7 +369,8 @@ pub struct Validator { } impl Validator { - /// Get a struct representing this node's validator if this node is in fact a validator in the context of the given block. + /// Get a struct representing this node's validator if this node is in fact a validator in the + /// context of the given block. pub async fn new(parent: Hash, keystore: KeystorePtr, sender: &mut S) -> Result where S: SubsystemSender, diff --git a/node/subsystem-util/src/nesting_sender.rs b/node/subsystem-util/src/nesting_sender.rs index 4417efbefb04..5d80dbf78101 100644 --- a/node/subsystem-util/src/nesting_sender.rs +++ b/node/subsystem-util/src/nesting_sender.rs @@ -33,14 +33,14 @@ //! //! This module helps with this in part. It does not break the multithreaded by default approach, //! but it breaks the `spawn everything` approach. So once you `spawn` you will still be -//! multithreaded by default, despite that for most tasks we spawn (which just wait for network or some -//! message to arrive), that is very much pointless and needless overhead. You will just spawn less in -//! the first place. +//! multithreaded by default, despite that for most tasks we spawn (which just wait for network or +//! some message to arrive), that is very much pointless and needless overhead. You will just spawn +//! less in the first place. //! //! By default your code is single threaded, except when actually needed: -//! - need to wait for long running synchronous IO (a threaded runtime is actually useful here) -//! - need to wait for some async event (message to arrive) -//! - need to do some hefty CPU bound processing (a thread is required here as well) +//! - need to wait for long running synchronous IO (a threaded runtime is actually useful here) +//! - need to wait for some async event (message to arrive) +//! - need to do some hefty CPU bound processing (a thread is required here as well) //! //! and it is not acceptable to block the main task for waiting for the result, because we actually //! really have other things to do or at least need to stay responsive just in case. @@ -48,7 +48,8 @@ //! With the types and traits in this module you can achieve exactly that: You write modules which //! just execute logic and can call into the functions of other modules - yes we are calling normal //! functions. For the case a module you are calling into requires an occasional background task, -//! you provide it with a `NestingSender` that it can pass to any spawned tasks. +//! you provide it with a `NestingSender` that it can pass to any spawned +//! tasks. //! //! This way you don't have to spawn a task for each module just for it to be able to handle //! asynchronous events. The module relies on the using/enclosing code/module to forward it any @@ -65,9 +66,9 @@ //! Because the wrapping is optional and transparent to the lower modules, each module can also be //! used at the top directly without any wrapping, e.g. for standalone use or for testing purposes. //! -//! Checkout the documentation of [`NestingSender`][nesting_sender::NestingSender] below for a basic usage example. For a real -//! world usage I would like to point you to the dispute-distribution subsystem which makes use of -//! this architecture. +//! Checkout the documentation of [`NestingSender`][nesting_sender::NestingSender] below for a basic +//! usage example. For a real world usage I would like to point you to the dispute-distribution +//! subsystem which makes use of this architecture. //! //! ## Limitations //! diff --git a/node/subsystem-util/src/reputation.rs b/node/subsystem-util/src/reputation.rs index 09c00bb4688a..89e3eb64df9b 100644 --- a/node/subsystem-util/src/reputation.rs +++ b/node/subsystem-util/src/reputation.rs @@ -48,7 +48,8 @@ impl ReputationAggregator { /// /// * `send_immediately_if` - A function, takes `UnifiedReputationChange`, /// results shows if we need to send the changes right away. - /// By default, it is used for sending `UnifiedReputationChange::Malicious` changes immediately and for testing. + /// By default, it is used for sending `UnifiedReputationChange::Malicious` changes immediately + /// and for testing. pub fn new(send_immediately_if: fn(UnifiedReputationChange) -> bool) -> Self { Self { by_peer: Default::default(), send_immediately_if } } diff --git a/node/test/client/src/block_builder.rs b/node/test/client/src/block_builder.rs index 88160e782a70..0987cef55c1f 100644 --- a/node/test/client/src/block_builder.rs +++ b/node/test/client/src/block_builder.rs @@ -32,15 +32,16 @@ use sp_state_machine::BasicExternalities; pub trait InitPolkadotBlockBuilder { /// Init a Polkadot specific block builder that works for the test runtime. /// - /// This will automatically create and push the inherents for you to make the block valid for the test runtime. + /// This will automatically create and push the inherents for you to make the block valid for + /// the test runtime. fn init_polkadot_block_builder( &self, ) -> sc_block_builder::BlockBuilder; /// Init a Polkadot specific block builder at a specific block that works for the test runtime. /// - /// Same as [`InitPolkadotBlockBuilder::init_polkadot_block_builder`] besides that it takes a [`BlockId`] to say - /// which should be the parent block of the block that is being build. + /// Same as [`InitPolkadotBlockBuilder::init_polkadot_block_builder`] besides that it takes a + /// [`BlockId`] to say which should be the parent block of the block that is being build. fn init_polkadot_block_builder_at( &self, hash: ::Hash, @@ -60,7 +61,8 @@ impl InitPolkadotBlockBuilder for Client { let last_timestamp = self.runtime_api().get_last_timestamp(hash).expect("Get last timestamp"); - // `MinimumPeriod` is a storage parameter type that requires externalities to access the value. + // `MinimumPeriod` is a storage parameter type that requires externalities to access the + // value. let minimum_period = BasicExternalities::new_empty() .execute_with(|| polkadot_test_runtime::MinimumPeriod::get()); @@ -73,7 +75,8 @@ impl InitPolkadotBlockBuilder for Client { last_timestamp + minimum_period }; - // `SlotDuration` is a storage parameter type that requires externalities to access the value. + // `SlotDuration` is a storage parameter type that requires externalities to access the + // value. let slot_duration = BasicExternalities::new_empty() .execute_with(|| polkadot_test_runtime::SlotDuration::get()); @@ -130,9 +133,9 @@ impl InitPolkadotBlockBuilder for Client { pub trait BlockBuilderExt { /// Push a Polkadot test runtime specific extrinsic to the block. /// - /// This will internally use the [`BlockBuilder::push`] method, but this method expects a opaque extrinsic. So, - /// we provide this wrapper which converts a test runtime specific extrinsic to a opaque extrinsic and pushes it to - /// the block. + /// This will internally use the [`BlockBuilder::push`] method, but this method expects a opaque + /// extrinsic. So, we provide this wrapper which converts a test runtime specific extrinsic to a + /// opaque extrinsic and pushes it to the block. /// /// Returns the result of the application of the extrinsic. fn push_polkadot_extrinsic( diff --git a/node/test/service/src/lib.rs b/node/test/service/src/lib.rs index a2c1b1941003..ed25d28d2925 100644 --- a/node/test/service/src/lib.rs +++ b/node/test/service/src/lib.rs @@ -257,7 +257,8 @@ pub struct PolkadotTestNode { pub client: Arc, /// A handle to Overseer. pub overseer_handle: Handle, - /// The `MultiaddrWithPeerId` to this node. This is useful if you want to pass it as "boot node" to other nodes. + /// The `MultiaddrWithPeerId` to this node. This is useful if you want to pass it as "boot + /// node" to other nodes. pub addr: MultiaddrWithPeerId, /// `RPCHandlers` to make RPC queries. pub rpc_handlers: RpcHandlers, @@ -312,14 +313,15 @@ impl PolkadotTestNode { self.send_sudo(call, Sr25519Keyring::Alice, 1).await } - /// Wait for `count` blocks to be imported in the node and then exit. This function will not return if no blocks - /// are ever created, thus you should restrict the maximum amount of time of the test execution. + /// Wait for `count` blocks to be imported in the node and then exit. This function will not + /// return if no blocks are ever created, thus you should restrict the maximum amount of time of + /// the test execution. pub fn wait_for_blocks(&self, count: usize) -> impl Future { self.client.wait_for_blocks(count) } - /// Wait for `count` blocks to be finalized and then exit. Similarly with `wait_for_blocks` this function will - /// not return if no block are ever finalized. + /// Wait for `count` blocks to be finalized and then exit. Similarly with `wait_for_blocks` this + /// function will not return if no block are ever finalized. pub async fn wait_for_finalized_blocks(&self, count: usize) { let mut import_notification_stream = self.client.finality_notification_stream(); let mut blocks = HashSet::new(); diff --git a/parachain/src/primitives.rs b/parachain/src/primitives.rs index 18da89aa97a1..55577618c469 100644 --- a/parachain/src/primitives.rs +++ b/parachain/src/primitives.rs @@ -287,13 +287,13 @@ impl IsSystem for Sibling { } } -/// A type that uniquely identifies an HRMP channel. An HRMP channel is established between two paras. -/// In text, we use the notation `(A, B)` to specify a channel between A and B. The channels are -/// unidirectional, meaning that `(A, B)` and `(B, A)` refer to different channels. The convention is -/// that we use the first item tuple for the sender and the second for the recipient. Only one channel -/// is allowed between two participants in one direction, i.e. there cannot be 2 different channels -/// identified by `(A, B)`. A channel with the same para id in sender and recipient is invalid. That -/// is, however, not enforced. +/// A type that uniquely identifies an HRMP channel. An HRMP channel is established between two +/// paras. In text, we use the notation `(A, B)` to specify a channel between A and B. The channels +/// are unidirectional, meaning that `(A, B)` and `(B, A)` refer to different channels. The +/// convention is that we use the first item tuple for the sender and the second for the recipient. +/// Only one channel is allowed between two participants in one direction, i.e. there cannot be 2 +/// different channels identified by `(A, B)`. A channel with the same para id in sender and +/// recipient is invalid. That is, however, not enforced. #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Encode, Decode, RuntimeDebug, TypeInfo)] #[cfg_attr(feature = "std", derive(Hash))] pub struct HrmpChannelId { @@ -414,6 +414,7 @@ pub struct ValidationResult { /// /// It is expected that the Parachain processes them from first to last. pub processed_downward_messages: u32, - /// The mark which specifies the block number up to which all inbound HRMP messages are processed. + /// The mark which specifies the block number up to which all inbound HRMP messages are + /// processed. pub hrmp_watermark: RelayChainBlockNumber, } diff --git a/parachain/test-parachains/adder/collator/src/lib.rs b/parachain/test-parachains/adder/collator/src/lib.rs index 02a4598f9e47..1ac561dda2ba 100644 --- a/parachain/test-parachains/adder/collator/src/lib.rs +++ b/parachain/test-parachains/adder/collator/src/lib.rs @@ -147,7 +147,8 @@ impl Collator { /// Create the collation function. /// - /// This collation function can be plugged into the overseer to generate collations for the adder parachain. + /// This collation function can be plugged into the overseer to generate collations for the + /// adder parachain. pub fn create_collation_function( &self, spawner: impl SpawnNamed + Clone + 'static, @@ -228,8 +229,9 @@ impl Collator { /// Wait until `seconded` collations of this collator are seconded by a parachain validator. /// - /// The internal counter isn't de-duplicating the collations when counting the number of seconded collations. This - /// means when one collation is seconded by X validators, we record X seconded messages. + /// The internal counter isn't de-duplicating the collations when counting the number of + /// seconded collations. This means when one collation is seconded by X validators, we record X + /// seconded messages. pub async fn wait_for_seconded_collations(&self, seconded: u32) { let seconded_collations = self.seconded_collations.clone(); loop { diff --git a/parachain/test-parachains/adder/collator/tests/integration.rs b/parachain/test-parachains/adder/collator/tests/integration.rs index 9ab1c0c337a6..b891b29db59c 100644 --- a/parachain/test-parachains/adder/collator/tests/integration.rs +++ b/parachain/test-parachains/adder/collator/tests/integration.rs @@ -19,7 +19,8 @@ const PUPPET_EXE: &str = env!("CARGO_BIN_EXE_adder_collator_puppet_worker"); -// If this test is failing, make sure to run all tests with the `real-overseer` feature being enabled. +// If this test is failing, make sure to run all tests with the `real-overseer` feature being +// enabled. #[substrate_test_utils::test(flavor = "multi_thread")] async fn collating_using_adder_collator() { diff --git a/parachain/test-parachains/undying/collator/src/lib.rs b/parachain/test-parachains/undying/collator/src/lib.rs index 838590fa16f5..cc0f592dc253 100644 --- a/parachain/test-parachains/undying/collator/src/lib.rs +++ b/parachain/test-parachains/undying/collator/src/lib.rs @@ -221,7 +221,8 @@ impl Collator { /// Create the collation function. /// - /// This collation function can be plugged into the overseer to generate collations for the undying parachain. + /// This collation function can be plugged into the overseer to generate collations for the + /// undying parachain. pub fn create_collation_function( &self, spawner: impl SpawnNamed + Clone + 'static, @@ -309,8 +310,9 @@ impl Collator { /// Wait until `seconded` collations of this collator are seconded by a parachain validator. /// - /// The internal counter isn't de-duplicating the collations when counting the number of seconded collations. This - /// means when one collation is seconded by X validators, we record X seconded messages. + /// The internal counter isn't de-duplicating the collations when counting the number of + /// seconded collations. This means when one collation is seconded by X validators, we record X + /// seconded messages. pub async fn wait_for_seconded_collations(&self, seconded: u32) { let seconded_collations = self.seconded_collations.clone(); loop { diff --git a/parachain/test-parachains/undying/collator/tests/integration.rs b/parachain/test-parachains/undying/collator/tests/integration.rs index 8ca6eec9aa62..21d174fb06c7 100644 --- a/parachain/test-parachains/undying/collator/tests/integration.rs +++ b/parachain/test-parachains/undying/collator/tests/integration.rs @@ -19,7 +19,8 @@ const PUPPET_EXE: &str = env!("CARGO_BIN_EXE_undying_collator_puppet_worker"); -// If this test is failing, make sure to run all tests with the `real-overseer` feature being enabled. +// If this test is failing, make sure to run all tests with the `real-overseer` feature being +// enabled. #[substrate_test_utils::test(flavor = "multi_thread")] async fn collating_using_undying_collator() { use polkadot_primitives::Id as ParaId; diff --git a/primitives/src/runtime_api.rs b/primitives/src/runtime_api.rs index ec05beea9d5f..c3a150a642e0 100644 --- a/primitives/src/runtime_api.rs +++ b/primitives/src/runtime_api.rs @@ -30,10 +30,9 @@ //! The versioning is achieved with the `api_version` attribute. It can be //! placed on: //! * trait declaration - represents the base version of the API. -//! * method declaration (inside a trait declaration) - represents a versioned -//! method, which is not available in the base version. -//! * trait implementation - represents which version of the API is being -//! implemented. +//! * method declaration (inside a trait declaration) - represents a versioned method, which is not +//! available in the base version. +//! * trait implementation - represents which version of the API is being implemented. //! //! Let's see a quick example: //! @@ -90,14 +89,14 @@ //! # How versioned methods are used for `ParachainHost` //! //! Let's introduce two types of `ParachainHost` API implementation: -//! * stable - used on stable production networks like Polkadot and Kusama. There is only one -//! stable API at a single point in time. +//! * stable - used on stable production networks like Polkadot and Kusama. There is only one stable +//! API at a single point in time. //! * staging - methods that are ready for production, but will be released on Rococo first. We can //! batch together multiple changes and then release all of them to production, by making staging //! production (bump base version). We can not change or remove any method in staging after a -//! release, as this would break Rococo. It should be ok to keep adding methods to staging -//! across several releases. For experimental methods, you have to keep them on a separate branch -//! until ready. +//! release, as this would break Rococo. It should be ok to keep adding methods to staging across +//! several releases. For experimental methods, you have to keep them on a separate branch until +//! ready. //! //! The stable version of `ParachainHost` is indicated by the base version of the API. Any staging //! method must use `api_version` attribute so that it is assigned to a specific version of a @@ -111,8 +110,8 @@ //! ``` //! indicates a function from the stable `v2` API. //! -//! All staging API functions should use primitives from `vstaging`. They should be clearly separated -//! from the stable primitives. +//! All staging API functions should use primitives from `vstaging`. They should be clearly +//! separated from the stable primitives. use crate::{ vstaging, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, diff --git a/primitives/src/v5/metrics.rs b/primitives/src/v5/metrics.rs index f947c7392dcb..97f7678e4373 100644 --- a/primitives/src/v5/metrics.rs +++ b/primitives/src/v5/metrics.rs @@ -164,8 +164,8 @@ pub mod metric_definitions { }; /// Counts the number of `imported`, `current` and `concluded_invalid` dispute statements sets - /// processed in `process_inherent_data`. The `current` label refers to the disputes statement sets of - /// the current session. + /// processed in `process_inherent_data`. The `current` label refers to the disputes statement + /// sets of the current session. pub const PARACHAIN_INHERENT_DATA_DISPUTE_SETS_PROCESSED: CounterVecDefinition = CounterVecDefinition { name: "polkadot_parachain_inherent_data_dispute_sets_processed", @@ -174,7 +174,8 @@ pub mod metric_definitions { labels: &["category"], }; - /// Counts the number of `valid` and `invalid` bitfields signature checked in `process_inherent_data`. + /// Counts the number of `valid` and `invalid` bitfields signature checked in + /// `process_inherent_data`. pub const PARACHAIN_CREATE_INHERENT_BITFIELDS_SIGNATURE_CHECKS: CounterVecDefinition = CounterVecDefinition { name: "polkadot_parachain_create_inherent_bitfields_signature_checks", @@ -183,7 +184,8 @@ pub mod metric_definitions { labels: &["validity"], }; - /// Measures how much time does it take to verify a single validator signature of a dispute statement + /// Measures how much time does it take to verify a single validator signature of a dispute + /// statement pub const PARACHAIN_VERIFY_DISPUTE_SIGNATURE: HistogramDefinition = HistogramDefinition { name: "polkadot_parachain_verify_dispute_signature", diff --git a/primitives/src/v5/mod.rs b/primitives/src/v5/mod.rs index 3498c0762d4c..bdd10e623190 100644 --- a/primitives/src/v5/mod.rs +++ b/primitives/src/v5/mod.rs @@ -103,7 +103,8 @@ pub trait TypeIndex { fn type_index(&self) -> usize; } -/// Index of the validator is used as a lightweight replacement of the `ValidatorId` when appropriate. +/// Index of the validator is used as a lightweight replacement of the `ValidatorId` when +/// appropriate. #[derive(Eq, Ord, PartialEq, PartialOrd, Copy, Clone, Encode, Decode, TypeInfo, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash))] pub struct ValidatorIndex(pub u32); @@ -589,25 +590,27 @@ impl Ord for CommittedCandidateReceipt { } } -/// The validation data provides information about how to create the inputs for validation of a candidate. -/// This information is derived from the chain state and will vary from para to para, although some -/// fields may be the same for every para. +/// The validation data provides information about how to create the inputs for validation of a +/// candidate. This information is derived from the chain state and will vary from para to para, +/// although some fields may be the same for every para. /// -/// Since this data is used to form inputs to the validation function, it needs to be persisted by the -/// availability system to avoid dependence on availability of the relay-chain state. +/// Since this data is used to form inputs to the validation function, it needs to be persisted by +/// the availability system to avoid dependence on availability of the relay-chain state. /// -/// Furthermore, the validation data acts as a way to authorize the additional data the collator needs -/// to pass to the validation function. For example, the validation function can check whether the incoming -/// messages (e.g. downward messages) were actually sent by using the data provided in the validation data -/// using so called MQC heads. +/// Furthermore, the validation data acts as a way to authorize the additional data the collator +/// needs to pass to the validation function. For example, the validation function can check whether +/// the incoming messages (e.g. downward messages) were actually sent by using the data provided in +/// the validation data using so called MQC heads. /// -/// Since the commitments of the validation function are checked by the relay-chain, secondary checkers -/// can rely on the invariant that the relay-chain only includes para-blocks for which these checks have -/// already been done. As such, there is no need for the validation data used to inform validators and -/// collators about the checks the relay-chain will perform to be persisted by the availability system. +/// Since the commitments of the validation function are checked by the relay-chain, secondary +/// checkers can rely on the invariant that the relay-chain only includes para-blocks for which +/// these checks have already been done. As such, there is no need for the validation data used to +/// inform validators and collators about the checks the relay-chain will perform to be persisted by +/// the availability system. /// -/// The `PersistedValidationData` should be relatively lightweight primarily because it is constructed -/// during inclusion for each candidate and therefore lies on the critical path of inclusion. +/// The `PersistedValidationData` should be relatively lightweight primarily because it is +/// constructed during inclusion for each candidate and therefore lies on the critical path of +/// inclusion. #[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Default))] pub struct PersistedValidationData { @@ -642,7 +645,8 @@ pub struct CandidateCommitments { pub head_data: HeadData, /// The number of messages processed from the DMQ. pub processed_downward_messages: u32, - /// The mark which specifies the block number up to which all inbound HRMP messages are processed. + /// The mark which specifies the block number up to which all inbound HRMP messages are + /// processed. pub hrmp_watermark: N, } @@ -677,7 +681,8 @@ pub type UncheckedSignedAvailabilityBitfield = UncheckedSigned; -/// A set of unchecked signed availability bitfields. Should be sorted by validator index, ascending. +/// A set of unchecked signed availability bitfields. Should be sorted by validator index, +/// ascending. pub type UncheckedSignedAvailabilityBitfields = Vec; /// A backed (or backable, depending on context) candidate. @@ -975,8 +980,9 @@ pub enum CoreState { /// variant. #[codec(index = 1)] Scheduled(ScheduledCore), - /// The core is currently free and there is nothing scheduled. This can be the case for parathread - /// cores when there are no parathread blocks queued. Parachain cores will never be left idle. + /// The core is currently free and there is nothing scheduled. This can be the case for + /// parathread cores when there are no parathread blocks queued. Parachain cores will never be + /// left idle. #[codec(index = 2)] Free, } @@ -1079,8 +1085,8 @@ impl From for u8 { } } -/// Abridged version of `HostConfiguration` (from the `Configuration` parachains host runtime module) -/// meant to be used by a parachain or PDK such as cumulus. +/// Abridged version of `HostConfiguration` (from the `Configuration` parachains host runtime +/// module) meant to be used by a parachain or PDK such as cumulus. #[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] #[cfg_attr(feature = "std", derive(PartialEq))] pub struct AbridgedHostConfiguration { @@ -1156,17 +1162,18 @@ pub enum UpgradeRestriction { #[derive(Copy, Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] pub enum UpgradeGoAhead { /// Abort the upgrade process. There is something wrong with the validation code previously - /// submitted by the parachain. This variant can also be used to prevent upgrades by the governance - /// should an emergency emerge. + /// submitted by the parachain. This variant can also be used to prevent upgrades by the + /// governance should an emergency emerge. /// /// The expected reaction on this variant is that the parachain will admit this message and /// remove all the data about the pending upgrade. Depending on the nature of the problem (to - /// be examined offchain for now), it can try to send another validation code or just retry later. + /// be examined offchain for now), it can try to send another validation code or just retry + /// later. #[codec(index = 0)] Abort, - /// Apply the pending code change. The parablock that is built on a relay-parent that is descendant - /// of the relay-parent where the parachain observed this signal must use the upgraded validation - /// code. + /// Apply the pending code change. The parablock that is built on a relay-parent that is + /// descendant of the relay-parent where the parachain observed this signal must use the + /// upgraded validation code. #[codec(index = 1)] GoAhead, } @@ -1646,7 +1653,7 @@ pub const fn supermajority_threshold(n: usize) -> usize { #[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] #[cfg_attr(feature = "std", derive(PartialEq))] pub struct SessionInfo { - /****** New in v2 *******/ + /****** New in v2 ****** */ /// All the validators actively participating in parachain consensus. /// Indices are into the broader validator set. pub active_validator_indices: Vec, @@ -1655,11 +1662,11 @@ pub struct SessionInfo { /// The amount of sessions to keep for disputes. pub dispute_period: SessionIndex, - /****** Old fields ******/ + /****** Old fields ***** */ /// Validators in canonical ordering. /// - /// NOTE: There might be more authorities in the current session, than `validators` participating - /// in parachain consensus. See + /// NOTE: There might be more authorities in the current session, than `validators` + /// participating in parachain consensus. See /// [`max_validators`](https://github.com/paritytech/polkadot/blob/a52dca2be7840b23c19c153cf7e110b1e3e475f8/runtime/parachains/src/configuration.rs#L148). /// /// `SessionInfo::validators` will be limited to to `max_validators` when set. @@ -1667,8 +1674,8 @@ pub struct SessionInfo { /// Validators' authority discovery keys for the session in canonical ordering. /// /// NOTE: The first `validators.len()` entries will match the corresponding validators in - /// `validators`, afterwards any remaining authorities can be found. This is any authorities not - /// participating in parachain consensus - see + /// `validators`, afterwards any remaining authorities can be found. This is any authorities + /// not participating in parachain consensus - see /// [`max_validators`](https://github.com/paritytech/polkadot/blob/a52dca2be7840b23c19c153cf7e110b1e3e475f8/runtime/parachains/src/configuration.rs#L148) pub discovery_keys: Vec, /// The assignment keys for validators. @@ -1679,8 +1686,8 @@ pub struct SessionInfo { /// /// Therefore: /// ```ignore - /// assignment_keys.len() == validators.len() && validators.len() <= discovery_keys.len() - /// ``` + /// assignment_keys.len() == validators.len() && validators.len() <= discovery_keys.len() + /// ``` pub assignment_keys: Vec, /// Validators in shuffled ordering - these are the validator groups as produced /// by the `Scheduler` module for the session and are typically referred to by diff --git a/primitives/test-helpers/src/lib.rs b/primitives/test-helpers/src/lib.rs index ac7af5b5fa7d..a8fc0f7ccc26 100644 --- a/primitives/test-helpers/src/lib.rs +++ b/primitives/test-helpers/src/lib.rs @@ -17,7 +17,8 @@ #![forbid(unused_crate_dependencies)] #![forbid(unused_extern_crates)] -//! A set of primitive constructors, to aid in crafting meaningful testcase while reducing repetition. +//! A set of primitive constructors, to aid in crafting meaningful testcase while reducing +//! repetition. //! //! Note that `dummy_` prefixed values are meant to be fillers, that should not matter, and will //! contain randomness based data. diff --git a/runtime/common/slot_range_helper/src/lib.rs b/runtime/common/slot_range_helper/src/lib.rs index 626232032fbd..bbe5b61ae1f3 100644 --- a/runtime/common/slot_range_helper/src/lib.rs +++ b/runtime/common/slot_range_helper/src/lib.rs @@ -36,15 +36,15 @@ pub use sp_std::{ops::Add, result}; /// /// This will generate an enum `SlotRange` with the following properties: /// -/// * Enum variants will range from all consecutive combinations of inputs, i.e. -/// `ZeroZero`, `ZeroOne`, `ZeroTwo`, `ZeroThree`, `OneOne`, `OneTwo`, `OneThree`... +/// * Enum variants will range from all consecutive combinations of inputs, i.e. `ZeroZero`, +/// `ZeroOne`, `ZeroTwo`, `ZeroThree`, `OneOne`, `OneTwo`, `OneThree`... /// * A constant `LEASE_PERIODS_PER_SLOT` will count the number of lease periods. /// * A constant `SLOT_RANGE_COUNT` will count the total number of enum variants. /// * A function `as_pair` will return a tuple representation of the `SlotRange`. /// * A function `intersects` will tell you if two slot ranges intersect with one another. /// * A function `len` will tell you the length of occupying a `SlotRange`. -/// * A function `new_bounded` will generate a `SlotRange` from an input of the current -/// lease period, the starting lease period, and the final lease period. +/// * A function `new_bounded` will generate a `SlotRange` from an input of the current lease +/// period, the starting lease period, and the final lease period. #[macro_export] macro_rules! generate_slot_range{ // Entry point diff --git a/runtime/common/src/assigned_slots.rs b/runtime/common/src/assigned_slots.rs index 4424738c9835..b3c1381c9ec9 100644 --- a/runtime/common/src/assigned_slots.rs +++ b/runtime/common/src/assigned_slots.rs @@ -322,7 +322,8 @@ pub mod pallet { }, Err(err) => { // Treat failed lease creation as warning .. slot will be allocated a lease - // in a subsequent lease period by the `allocate_temporary_slot_leases` function. + // in a subsequent lease period by the `allocate_temporary_slot_leases` + // function. log::warn!(target: "assigned_slots", "Failed to allocate a temp slot for para {:?} at period {:?}: {:?}", id, current_lease_period, err @@ -398,7 +399,8 @@ impl Pallet { /// total number of lease (lower first), and then when they last a turn (older ones first). /// If any remaining ex-aequo, we just take the para ID in ascending order as discriminator. /// - /// Assigned slots with a `period_begin` bigger than current lease period are not considered (yet). + /// Assigned slots with a `period_begin` bigger than current lease period are not considered + /// (yet). /// /// The function will call out to `Leaser::lease_out` to create the appropriate slot leases. fn allocate_temporary_slot_leases(lease_period_index: LeasePeriodOf) -> DispatchResult { @@ -525,7 +527,8 @@ impl Pallet { /// Handles start of a lease period. fn manage_lease_period_start(lease_period_index: LeasePeriodOf) -> Weight { - // Note: leases that have ended in previous lease period, should have been cleaned in slots pallet. + // Note: leases that have ended in previous lease period, should have been cleaned in slots + // pallet. if let Err(err) = Self::allocate_temporary_slot_leases(lease_period_index) { log::error!(target: "assigned_slots", "Allocating slots failed for lease period {:?}, with: {:?}", diff --git a/runtime/common/src/auctions.rs b/runtime/common/src/auctions.rs index 7ab12eec7998..901c9c27da28 100644 --- a/runtime/common/src/auctions.rs +++ b/runtime/common/src/auctions.rs @@ -138,8 +138,8 @@ pub mod pallet { Reserved { bidder: T::AccountId, extra_reserved: BalanceOf, total_amount: BalanceOf }, /// Funds were unreserved since bidder is no longer active. `[bidder, amount]` Unreserved { bidder: T::AccountId, amount: BalanceOf }, - /// Someone attempted to lease the same slot twice for a parachain. The amount is held in reserve - /// but no parachain slot has been leased. + /// Someone attempted to lease the same slot twice for a parachain. The amount is held in + /// reserve but no parachain slot has been leased. ReserveConfiscated { para_id: ParaId, leaser: T::AccountId, amount: BalanceOf }, /// A new bid has been accepted as the current winner. BidAccepted { @@ -149,7 +149,8 @@ pub mod pallet { first_slot: LeasePeriodOf, last_slot: LeasePeriodOf, }, - /// The winning offset was chosen for an auction. This will map into the `Winning` storage map. + /// The winning offset was chosen for an auction. This will map into the `Winning` storage + /// map. WinningOffset { auction_index: AuctionIndex, block_number: BlockNumberFor }, } @@ -217,9 +218,9 @@ pub mod pallet { fn on_initialize(n: BlockNumberFor) -> Weight { let mut weight = T::DbWeight::get().reads(1); - // If the current auction was in its ending period last block, then ensure that the (sub-)range - // winner information is duplicated from the previous block in case no bids happened in the - // last block. + // If the current auction was in its ending period last block, then ensure that the + // (sub-)range winner information is duplicated from the previous block in case no bids + // happened in the last block. if let AuctionStatus::EndingPeriod(offset, _sub_sample) = Self::auction_status(n) { weight = weight.saturating_add(T::DbWeight::get().reads(1)); if !Winning::::contains_key(&offset) { @@ -555,8 +556,9 @@ impl Pallet { }); let res = Winning::::get(offset) .unwrap_or([Self::EMPTY; SlotRange::SLOT_RANGE_COUNT]); - // This `remove_all` statement should remove at most `EndingPeriod` / `SampleLength` items, - // which should be bounded and sensibly configured in the runtime. + // This `remove_all` statement should remove at most `EndingPeriod` / + // `SampleLength` items, which should be bounded and sensibly configured in the + // runtime. #[allow(deprecated)] Winning::::remove_all(None); AuctionInfo::::kill(); @@ -574,8 +576,8 @@ impl Pallet { auction_lease_period_index: LeasePeriodOf, winning_ranges: WinningData, ) { - // First, unreserve all amounts that were reserved for the bids. We will later re-reserve the - // amounts from the bidders that ended up being assigned the slot so there's no need to + // First, unreserve all amounts that were reserved for the bids. We will later re-reserve + // the amounts from the bidders that ended up being assigned the slot so there's no need to // special-case them here. for ((bidder, _), amount) in ReservedAmounts::::drain() { CurrencyOf::::unreserve(&bidder, amount); @@ -596,12 +598,12 @@ impl Pallet { Err(LeaseError::ReserveFailed) | Err(LeaseError::AlreadyEnded) | Err(LeaseError::NoLeasePeriod) => { - // Should never happen since we just unreserved this amount (and our offset is from the - // present period). But if it does, there's not much we can do. + // Should never happen since we just unreserved this amount (and our offset is + // from the present period). But if it does, there's not much we can do. }, Err(LeaseError::AlreadyLeased) => { - // The leaser attempted to get a second lease on the same para ID, possibly griefing us. Let's - // keep the amount reserved and let governance sort it out. + // The leaser attempted to get a second lease on the same para ID, possibly + // griefing us. Let's keep the amount reserved and let governance sort it out. if CurrencyOf::::reserve(&leaser, amount).is_ok() { Self::deposit_event(Event::::ReserveConfiscated { para_id: para, @@ -1123,11 +1125,11 @@ mod tests { Auctions::auction_status(System::block_number()), AuctionStatus::::EndingPeriod(2, 0) ); - // This will prevent the auction's winner from being decided in the next block, since the random - // seed was known before the final bids were made. + // This will prevent the auction's winner from being decided in the next block, since + // the random seed was known before the final bids were made. set_last_random(H256::zero(), 8); - // Auction definitely ended now, but we don't know exactly when in the last 3 blocks yet since - // no randomness available yet. + // Auction definitely ended now, but we don't know exactly when in the last 3 blocks yet + // since no randomness available yet. run_to_block(9); // Auction has now ended... But auction winner still not yet decided, so no leases yet. assert_eq!( @@ -1136,8 +1138,8 @@ mod tests { ); assert_eq!(leases(), vec![]); - // Random seed now updated to a value known at block 9, when the auction ended. This means - // that the winner can now be chosen. + // Random seed now updated to a value known at block 9, when the auction ended. This + // means that the winner can now be chosen. set_last_random(H256::zero(), 9); run_to_block(10); // Auction ended and winner selected diff --git a/runtime/common/src/claims.rs b/runtime/common/src/claims.rs index 6a41a8f3f472..9cc06b2bede2 100644 --- a/runtime/common/src/claims.rs +++ b/runtime/common/src/claims.rs @@ -193,8 +193,8 @@ pub mod pallet { SignerHasNoClaim, /// Account ID sending transaction has no claim. SenderHasNoClaim, - /// There's not enough in the pot to pay out some unvested amount. Generally implies a logic - /// error. + /// There's not enough in the pot to pay out some unvested amount. Generally implies a + /// logic error. PotUnderflow, /// A needed statement was not included. InvalidStatement, @@ -288,8 +288,8 @@ pub mod pallet { /// /// Parameters: /// - `dest`: The destination account to payout the claim. - /// - `ethereum_signature`: The signature of an ethereum signed message - /// matching the format described above. + /// - `ethereum_signature`: The signature of an ethereum signed message matching the format + /// described above. /// /// /// The weight of this call is invariant over the input parameters. @@ -368,9 +368,10 @@ pub mod pallet { /// /// Parameters: /// - `dest`: The destination account to payout the claim. - /// - `ethereum_signature`: The signature of an ethereum signed message - /// matching the format described above. - /// - `statement`: The identity of the statement which is being attested to in the signature. + /// - `ethereum_signature`: The signature of an ethereum signed message matching the format + /// described above. + /// - `statement`: The identity of the statement which is being attested to in the + /// signature. /// /// /// The weight of this call is invariant over the input parameters. @@ -400,14 +401,16 @@ pub mod pallet { /// Attest to a statement, needed to finalize the claims process. /// - /// WARNING: Insecure unless your chain includes `PrevalidateAttests` as a `SignedExtension`. + /// WARNING: Insecure unless your chain includes `PrevalidateAttests` as a + /// `SignedExtension`. /// /// Unsigned Validation: /// A call to attest is deemed valid if the sender has a `Preclaim` registered /// and provides a `statement` which is expected for the account. /// /// Parameters: - /// - `statement`: The identity of the statement which is being attested to in the signature. + /// - `statement`: The identity of the statement which is being attested to in the + /// signature. /// /// /// The weight of this call is invariant over the input parameters. diff --git a/runtime/common/src/crowdloan/migration.rs b/runtime/common/src/crowdloan/migration.rs index 4a47f3283de3..03c4ab6c3119 100644 --- a/runtime/common/src/crowdloan/migration.rs +++ b/runtime/common/src/crowdloan/migration.rs @@ -134,8 +134,8 @@ pub mod crowdloan_index_migration { Ok(()) } - /// This migration converts crowdloans to use a crowdloan index rather than the parachain id as a - /// unique identifier. This makes it easier to swap two crowdloans between parachains. + /// This migration converts crowdloans to use a crowdloan index rather than the parachain id as + /// a unique identifier. This makes it easier to swap two crowdloans between parachains. pub fn migrate() -> frame_support::weights::Weight { let mut weight = Weight::zero(); diff --git a/runtime/common/src/crowdloan/mod.rs b/runtime/common/src/crowdloan/mod.rs index 18c86e68e5df..1db046c52701 100644 --- a/runtime/common/src/crowdloan/mod.rs +++ b/runtime/common/src/crowdloan/mod.rs @@ -45,9 +45,9 @@ //! slot auction enters its ending period, then parachains will each place a bid; the bid will be //! raised once per block if the parachain had additional funds contributed since the last bid. //! -//! Successful funds remain tracked (in the `Funds` storage item and the associated child trie) as long as -//! the parachain remains active. Users can withdraw their funds once the slot is completed and funds are -//! returned to the crowdloan account. +//! Successful funds remain tracked (in the `Funds` storage item and the associated child trie) as +//! long as the parachain remains active. Users can withdraw their funds once the slot is completed +//! and funds are returned to the crowdloan account. pub mod migration; @@ -164,11 +164,11 @@ pub struct FundInfo { /// If this is `Ending(n)`, this fund received a contribution during the current ending period, /// where `n` is how far into the ending period the contribution was made. pub last_contribution: LastContribution, - /// First lease period in range to bid on; it's actually a `LeasePeriod`, but that's the same type - /// as `BlockNumber`. + /// First lease period in range to bid on; it's actually a `LeasePeriod`, but that's the same + /// type as `BlockNumber`. pub first_period: LeasePeriod, - /// Last lease period in range to bid on; it's actually a `LeasePeriod`, but that's the same type - /// as `BlockNumber`. + /// Last lease period in range to bid on; it's actually a `LeasePeriod`, but that's the same + /// type as `BlockNumber`. pub last_period: LeasePeriod, /// Unique index used to represent this fund. pub fund_index: FundIndex, @@ -192,15 +192,16 @@ pub mod pallet { pub trait Config: frame_system::Config { type RuntimeEvent: From> + IsType<::RuntimeEvent>; - /// `PalletId` for the crowdloan pallet. An appropriate value could be `PalletId(*b"py/cfund")` + /// `PalletId` for the crowdloan pallet. An appropriate value could be + /// `PalletId(*b"py/cfund")` #[pallet::constant] type PalletId: Get; /// The amount to be held on deposit by the depositor of a crowdloan. type SubmissionDeposit: Get>; - /// The minimum amount that may be contributed into a crowdloan. Should almost certainly be at - /// least `ExistentialDeposit`. + /// The minimum amount that may be contributed into a crowdloan. Should almost certainly be + /// at least `ExistentialDeposit`. #[pallet::constant] type MinContribution: Get>; @@ -208,8 +209,8 @@ pub mod pallet { #[pallet::constant] type RemoveKeysLimit: Get; - /// The parachain registrar type. We just use this to ensure that only the manager of a para is able to - /// start a crowdloan for its slot. + /// The parachain registrar type. We just use this to ensure that only the manager of a para + /// is able to start a crowdloan for its slot. type Registrar: Registrar; /// The type representing the auctioning system. @@ -314,7 +315,8 @@ pub mod pallet { FundNotEnded, /// There are no contributions stored in this crowdloan. NoContributions, - /// The crowdloan is not ready to dissolve. Potentially still has a slot or in retirement period. + /// The crowdloan is not ready to dissolve. Potentially still has a slot or in retirement + /// period. NotReadyToDissolve, /// Invalid signature. InvalidSignature, @@ -342,8 +344,9 @@ pub mod pallet { for (fund, para_id) in new_raise.into_iter().filter_map(|i| Self::funds(i).map(|f| (f, i))) { - // Care needs to be taken by the crowdloan creator that this function will succeed given - // the crowdloaning configuration. We do some checks ahead of time in crowdloan `create`. + // Care needs to be taken by the crowdloan creator that this function will + // succeed given the crowdloaning configuration. We do some checks ahead of time + // in crowdloan `create`. let result = T::Auctioneer::place_bid( Self::fund_account_id(fund.fund_index), para_id, @@ -363,7 +366,8 @@ pub mod pallet { #[pallet::call] impl Pallet { - /// Create a new crowdloaning campaign for a parachain slot with the given lease period range. + /// Create a new crowdloaning campaign for a parachain slot with the given lease period + /// range. /// /// This applies a lock to your parachain configuration, ensuring that it cannot be changed /// by the parachain manager. @@ -462,16 +466,16 @@ pub mod pallet { /// /// Origin must be signed, but can come from anyone. /// - /// The fund must be either in, or ready for, retirement. For a fund to be *in* retirement, then the retirement - /// flag must be set. For a fund to be ready for retirement, then: + /// The fund must be either in, or ready for, retirement. For a fund to be *in* retirement, + /// then the retirement flag must be set. For a fund to be ready for retirement, then: /// - it must not already be in retirement; /// - the amount of raised funds must be bigger than the _free_ balance of the account; /// - and either: /// - the block number must be at least `end`; or /// - the current lease period must be greater than the fund's `last_period`. /// - /// In this case, the fund's retirement flag is set and its `end` is reset to the current block - /// number. + /// In this case, the fund's retirement flag is set and its `end` is reset to the current + /// block number. /// /// - `who`: The account whose contribution should be withdrawn. /// - `index`: The parachain to whose crowdloan the contribution was made. @@ -653,8 +657,9 @@ pub mod pallet { Ok(()) } - /// Contribute your entire balance to a crowd sale. This will transfer the entire balance of a user over to fund a parachain - /// slot. It will be withdrawable when the crowdloan has ended and the funds are unused. + /// Contribute your entire balance to a crowd sale. This will transfer the entire balance of + /// a user over to fund a parachain slot. It will be withdrawable when the crowdloan has + /// ended and the funds are unused. #[pallet::call_index(8)] #[pallet::weight(T::WeightInfo::contribute())] pub fn contribute_all( @@ -719,8 +724,8 @@ impl Pallet { } /// This function checks all conditions which would qualify a crowdloan has ended. - /// * If we have reached the `fund.end` block OR the first lease period the fund is - /// trying to bid for has started already. + /// * If we have reached the `fund.end` block OR the first lease period the fund is trying to + /// bid for has started already. /// * And, if the fund has enough free funds to refund full raised amount. fn ensure_crowdloan_ended( now: BlockNumberFor, @@ -775,8 +780,8 @@ impl Pallet { Error::::BidOrLeaseActive ); - // We disallow any crowdloan contributions during the VRF Period, so that people do not sneak their - // contributions into the auction when it would not impact the outcome. + // We disallow any crowdloan contributions during the VRF Period, so that people do not + // sneak their contributions into the auction when it would not impact the outcome. ensure!(!T::Auctioneer::auction_status(now).is_vrf(), Error::::VrfDelayInProgress); let (old_balance, memo) = Self::contribution_get(fund.fund_index, &who); @@ -1287,7 +1292,8 @@ mod tests { ); // Cannot create a crowdloan with nonsense end date - // This crowdloan would end in lease period 2, but is bidding for some slot that starts in lease period 1. + // This crowdloan would end in lease period 2, but is bidding for some slot that starts + // in lease period 1. assert_noop!( Crowdloan::create(RuntimeOrigin::signed(1), para, 1000, 1, 4, 41, None), Error::::EndTooFarInFuture @@ -1457,7 +1463,8 @@ mod tests { let para_2 = new_para(); let index = NextFundIndex::::get(); assert_ok!(Crowdloan::create(RuntimeOrigin::signed(1), para_2, 1000, 1, 4, 40, None)); - // Emulate a win by leasing out and putting a deposit. Slots pallet would normally do this. + // Emulate a win by leasing out and putting a deposit. Slots pallet would normally do + // this. let crowdloan_account = Crowdloan::fund_account_id(index); set_winner(para_2, crowdloan_account, true); assert_noop!( @@ -1465,8 +1472,8 @@ mod tests { Error::::BidOrLeaseActive ); - // Move past lease period 1, should not be allowed to have further contributions with a crowdloan - // that has starting period 1. + // Move past lease period 1, should not be allowed to have further contributions with a + // crowdloan that has starting period 1. let para_3 = new_para(); assert_ok!(Crowdloan::create(RuntimeOrigin::signed(1), para_3, 1000, 1, 4, 40, None)); run_to_block(40); diff --git a/runtime/common/src/integration_tests.rs b/runtime/common/src/integration_tests.rs index fa21fbf9ef69..34a49bc230b6 100644 --- a/runtime/common/src/integration_tests.rs +++ b/runtime/common/src/integration_tests.rs @@ -471,7 +471,8 @@ fn basic_end_to_end_works() { ); assert_eq!( slots::Leases::::get(ParaId::from(para_2)), - // -- 1 --- 2 --- 3 --- 4 --- 5 ---------------- 6 --------------------------- 7 ---------------- + // -- 1 --- 2 --- 3 --- 4 --- 5 ---------------- 6 --------------------------- 7 + // ---------------- vec![ None, None, @@ -599,7 +600,8 @@ fn basic_errors_fail() { #[test] fn competing_slots() { - // This test will verify that competing slots, from different sources will resolve appropriately. + // This test will verify that competing slots, from different sources will resolve + // appropriately. new_test_ext().execute_with(|| { assert!(System::block_number().is_one()); let max_bids = 10u32; @@ -789,7 +791,8 @@ fn competing_bids() { let crowdloan_1 = Crowdloan::fund_account_id(fund_1.fund_index); assert_eq!( slots::Leases::::get(ParaId::from(2000)), - // -- 1 --- 2 --- 3 --- 4 --- 5 ------------- 6 ------------------------ 7 ------------- + // -- 1 --- 2 --- 3 --- 4 --- 5 ------------- 6 ------------------------ 7 + // ------------- vec![ None, None, diff --git a/runtime/common/src/paras_registrar.rs b/runtime/common/src/paras_registrar.rs index 550f443a5a78..57d9e21bcf53 100644 --- a/runtime/common/src/paras_registrar.rs +++ b/runtime/common/src/paras_registrar.rs @@ -107,9 +107,9 @@ pub mod pallet { type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// The aggregated origin type must support the `parachains` origin. We require that we can - /// infallibly convert between this origin and the system origin, but in reality, they're the - /// same type, we just can't express that to the Rust type system without writing a `where` - /// clause everywhere. + /// infallibly convert between this origin and the system origin, but in reality, they're + /// the same type, we just can't express that to the Rust type system without writing a + /// `where` clause everywhere. type RuntimeOrigin: From<::RuntimeOrigin> + Into::RuntimeOrigin>>; @@ -163,14 +163,15 @@ pub mod pallet { CannotDowngrade, /// Cannot schedule upgrade of parathread to parachain CannotUpgrade, - /// Para is locked from manipulation by the manager. Must use parachain or relay chain governance. + /// Para is locked from manipulation by the manager. Must use parachain or relay chain + /// governance. ParaLocked, /// The ID given for registration has not been reserved. NotReserved, /// Registering parachain with empty code is not allowed. EmptyCode, - /// Cannot perform a parachain slot / lifecycle swap. Check that the state of both paras are - /// correct for the swap to work. + /// Cannot perform a parachain slot / lifecycle swap. Check that the state of both paras + /// are correct for the swap to work. CannotSwap, } @@ -180,8 +181,8 @@ pub mod pallet { /// Amount held on deposit for each para and the original depositor. /// - /// The given account ID is responsible for registering the code and initial head data, but may only do - /// so if it isn't yet registered. (After that, it's up to governance to do so.) + /// The given account ID is responsible for registering the code and initial head data, but may + /// only do so if it isn't yet registered. (After that, it's up to governance to do so.) #[pallet::storage] pub type Paras = StorageMap<_, Twox64Concat, ParaId, ParaInfo>>; @@ -224,8 +225,8 @@ pub mod pallet { /// - `validation_code`: The initial validation code of the parachain/thread. /// /// ## Deposits/Fees - /// The origin signed account must reserve a corresponding deposit for the registration. Anything already - /// reserved previously for this para ID is accounted for. + /// The origin signed account must reserve a corresponding deposit for the registration. + /// Anything already reserved previously for this para ID is accounted for. /// /// ## Events /// The `Registered` event is emitted in case of success. @@ -264,7 +265,8 @@ pub mod pallet { /// Deregister a Para Id, freeing all data and returning any deposit. /// - /// The caller must be Root, the `para` owner, or the `para` itself. The para must be a parathread. + /// The caller must be Root, the `para` owner, or the `para` itself. The para must be a + /// parathread. #[pallet::call_index(2)] #[pallet::weight(::WeightInfo::deregister())] pub fn deregister(origin: OriginFor, id: ParaId) -> DispatchResult { @@ -345,17 +347,20 @@ pub mod pallet { /// Reserve a Para Id on the relay chain. /// /// This function will reserve a new Para Id to be owned/managed by the origin account. - /// The origin account is able to register head data and validation code using `register` to create - /// a parathread. Using the Slots pallet, a parathread can then be upgraded to get a parachain slot. + /// The origin account is able to register head data and validation code using `register` to + /// create a parathread. Using the Slots pallet, a parathread can then be upgraded to get a + /// parachain slot. /// /// ## Arguments - /// - `origin`: Must be called by a `Signed` origin. Becomes the manager/owner of the new para ID. + /// - `origin`: Must be called by a `Signed` origin. Becomes the manager/owner of the new + /// para ID. /// /// ## Deposits/Fees /// The origin must reserve a deposit of `ParaDeposit` for the registration. /// /// ## Events - /// The `Reserved` event is emitted in case of success, which provides the ID reserved for use. + /// The `Reserved` event is emitted in case of success, which provides the ID reserved for + /// use. #[pallet::call_index(5)] #[pallet::weight(::WeightInfo::reserve())] pub fn reserve(origin: OriginFor) -> DispatchResult { @@ -369,7 +374,8 @@ pub mod pallet { /// Add a manager lock from a para. This will prevent the manager of a /// para to deregister or swap a para. /// - /// Can be called by Root, the parachain, or the parachain manager if the parachain is unlocked. + /// Can be called by Root, the parachain, or the parachain manager if the parachain is + /// unlocked. #[pallet::call_index(6)] #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] pub fn add_lock(origin: OriginFor, para: ParaId) -> DispatchResult { @@ -380,7 +386,8 @@ pub mod pallet { /// Schedule a parachain upgrade. /// - /// Can be called by Root, the parachain, or the parachain manager if the parachain is unlocked. + /// Can be called by Root, the parachain, or the parachain manager if the parachain is + /// unlocked. #[pallet::call_index(7)] #[pallet::weight(::WeightInfo::schedule_code_upgrade(new_code.0.len() as u32))] pub fn schedule_code_upgrade( @@ -395,7 +402,8 @@ pub mod pallet { /// Set the parachain's current head. /// - /// Can be called by Root, the parachain, or the parachain manager if the parachain is unlocked. + /// Can be called by Root, the parachain, or the parachain manager if the parachain is + /// unlocked. #[pallet::call_index(8)] #[pallet::weight(::WeightInfo::set_current_head(new_head.0.len() as u32))] pub fn set_current_head( diff --git a/runtime/common/src/paras_sudo_wrapper.rs b/runtime/common/src/paras_sudo_wrapper.rs index 8944e932e9ef..d18eb8650aaf 100644 --- a/runtime/common/src/paras_sudo_wrapper.rs +++ b/runtime/common/src/paras_sudo_wrapper.rs @@ -45,8 +45,8 @@ pub mod pallet { ParaDoesntExist, /// The specified parachain or parathread is already registered. ParaAlreadyExists, - /// A DMP message couldn't be sent because it exceeds the maximum size allowed for a downward - /// message. + /// A DMP message couldn't be sent because it exceeds the maximum size allowed for a + /// downward message. ExceedsMaxMessageSize, /// Could not schedule para cleanup. CouldntCleanup, @@ -127,8 +127,8 @@ pub mod pallet { /// Send a downward XCM to the given para. /// - /// The given parachain should exist and the payload should not exceed the preconfigured size - /// `config.max_downward_message_size`. + /// The given parachain should exist and the payload should not exceed the preconfigured + /// size `config.max_downward_message_size`. #[pallet::call_index(4)] #[pallet::weight((1_000, DispatchClass::Operational))] pub fn sudo_queue_downward_xcm( diff --git a/runtime/common/src/purchase.rs b/runtime/common/src/purchase.rs index 246511a5d3d8..72795a733ea9 100644 --- a/runtime/common/src/purchase.rs +++ b/runtime/common/src/purchase.rs @@ -82,7 +82,8 @@ pub struct AccountStatus { locked_balance: Balance, /// Their sr25519/ed25519 signature verifying they have signed our required statement. signature: Vec, - /// The percentage of VAT the purchaser is responsible for. This is already factored into account balance. + /// The percentage of VAT the purchaser is responsible for. This is already factored into + /// account balance. vat: Permill, } @@ -333,12 +334,14 @@ pub mod pallet { if !status.locked_balance.is_zero() { let unlock_block = UnlockBlock::::get(); - // We allow some configurable portion of the purchased locked DOTs to be unlocked for basic usage. + // We allow some configurable portion of the purchased locked DOTs to be + // unlocked for basic usage. let unlocked = (T::UnlockedProportion::get() * status.locked_balance) .min(T::MaxUnlocked::get()); let locked = status.locked_balance.saturating_sub(unlocked); - // We checked that this account has no existing vesting schedule. So this function should - // never fail, however if it does, not much we can do about it at this point. + // We checked that this account has no existing vesting schedule. So this + // function should never fail, however if it does, not much we can do about + // it at this point. let _ = T::VestingSchedule::add_vesting_schedule( // Apply vesting schedule to this user &who, @@ -351,7 +354,8 @@ pub mod pallet { ); } - // Setting the user account to `Completed` ends the purchase process for this user. + // Setting the user account to `Completed` ends the purchase process for this + // user. status.validity = AccountValidity::Completed; Self::deposit_event(Event::::PaymentComplete { who: who.clone(), @@ -645,17 +649,20 @@ mod tests { } fn alice_signature() -> [u8; 64] { - // echo -n "Hello, World" | subkey -s sign "bottom drive obey lake curtain smoke basket hold race lonely fit walk//Alice" + // echo -n "Hello, World" | subkey -s sign "bottom drive obey lake curtain smoke basket hold + // race lonely fit walk//Alice" hex_literal::hex!("20e0faffdf4dfe939f2faa560f73b1d01cde8472e2b690b7b40606a374244c3a2e9eb9c8107c10b605138374003af8819bd4387d7c24a66ee9253c2e688ab881") } fn bob_signature() -> [u8; 64] { - // echo -n "Hello, World" | subkey -s sign "bottom drive obey lake curtain smoke basket hold race lonely fit walk//Bob" + // echo -n "Hello, World" | subkey -s sign "bottom drive obey lake curtain smoke basket hold + // race lonely fit walk//Bob" hex_literal::hex!("d6d460187ecf530f3ec2d6e3ac91b9d083c8fbd8f1112d92a82e4d84df552d18d338e6da8944eba6e84afaacf8a9850f54e7b53a84530d649be2e0119c7ce889") } fn alice_signature_ed25519() -> [u8; 64] { - // echo -n "Hello, World" | subkey -e sign "bottom drive obey lake curtain smoke basket hold race lonely fit walk//Alice" + // echo -n "Hello, World" | subkey -e sign "bottom drive obey lake curtain smoke basket hold + // race lonely fit walk//Alice" hex_literal::hex!("ee3f5a6cbfc12a8f00c18b811dc921b550ddf272354cda4b9a57b1d06213fcd8509f5af18425d39a279d13622f14806c3e978e2163981f2ec1c06e9628460b0e") } diff --git a/runtime/common/src/slots/mod.rs b/runtime/common/src/slots/mod.rs index 0be75fcba2b1..b4e136b1211c 100644 --- a/runtime/common/src/slots/mod.rs +++ b/runtime/common/src/slots/mod.rs @@ -14,12 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Parathread and parachains leasing system. Allows para IDs to be claimed, the code and data to be initialized and -//! parachain slots (i.e. continuous scheduling) to be leased. Also allows for parachains and parathreads to be -//! swapped. +//! Parathread and parachains leasing system. Allows para IDs to be claimed, the code and data to be +//! initialized and parachain slots (i.e. continuous scheduling) to be leased. Also allows for +//! parachains and parathreads to be swapped. //! -//! This doesn't handle the mechanics of determining which para ID actually ends up with a parachain lease. This -//! must handled by a separately, through the trait interface that this pallet provides or the root dispatchables. +//! This doesn't handle the mechanics of determining which para ID actually ends up with a parachain +//! lease. This must handled by a separately, through the trait interface that this pallet provides +//! or the root dispatchables. pub mod migration; @@ -98,8 +99,8 @@ pub mod pallet { /// Amounts held on deposit for each (possibly future) leased parachain. /// - /// The actual amount locked on its behalf by any account at any time is the maximum of the second values - /// of the items in this list whose first value is the account. + /// The actual amount locked on its behalf by any account at any time is the maximum of the + /// second values of the items in this list whose first value is the account. /// /// The first item in the list is the amount locked for the current Lease Period. Following /// items are for the subsequent lease periods. @@ -160,8 +161,8 @@ pub mod pallet { #[pallet::call] impl Pallet { - /// Just a connect into the `lease_out` call, in case Root wants to force some lease to happen - /// independently of any other on-chain mechanism to use it. + /// Just a connect into the `lease_out` call, in case Root wants to force some lease to + /// happen independently of any other on-chain mechanism to use it. /// /// The dispatch origin for this call must match `T::ForceOrigin`. #[pallet::call_index(0)] @@ -268,8 +269,8 @@ impl Pallet { // deposit for the parachain. let now_held = Self::deposit_held(para, &ended_lease.0); - // If this is less than what we were holding for this leaser's now-ended lease, then - // unreserve it. + // If this is less than what we were holding for this leaser's now-ended lease, + // then unreserve it. if let Some(rebate) = ended_lease.1.checked_sub(&now_held) { T::Currency::unreserve(&ended_lease.0, rebate); } @@ -392,8 +393,8 @@ impl Leaser> for Pallet { } } - // Figure out whether we already have some funds of `leaser` held in reserve for `para_id`. - // If so, then we can deduct those from the amount that we need to reserve. + // Figure out whether we already have some funds of `leaser` held in reserve for + // `para_id`. If so, then we can deduct those from the amount that we need to reserve. let maybe_additional = amount.checked_sub(&Self::deposit_held(para, &leaser)); if let Some(ref additional) = maybe_additional { T::Currency::reserve(&leaser, *additional) @@ -403,7 +404,8 @@ impl Leaser> for Pallet { let reserved = maybe_additional.unwrap_or_default(); // Check if current lease period is same as period begin, and onboard them directly. - // This will allow us to support onboarding new parachains in the middle of a lease period. + // This will allow us to support onboarding new parachains in the middle of a lease + // period. if current_lease_period == period_begin { // Best effort. Not much we can do if this fails. let _ = T::Registrar::make_parachain(para); @@ -481,7 +483,8 @@ impl Leaser> for Pallet { None => return true, }; - // Get the leases, and check each item in the vec which is part of the range we are checking. + // Get the leases, and check each item in the vec which is part of the range we are + // checking. let leases = Leases::::get(para_id); for slot in offset..=offset + period_count { if let Some(Some(_)) = leases.get(slot) { diff --git a/runtime/common/src/traits.rs b/runtime/common/src/traits.rs index f24a5b977968..940c3dfa2fb3 100644 --- a/runtime/common/src/traits.rs +++ b/runtime/common/src/traits.rs @@ -113,11 +113,12 @@ pub trait Leaser { /// /// `leaser` shall have a total of `amount` balance reserved by the implementer of this trait. /// - /// Note: The implementer of the trait (the leasing system) is expected to do all reserve/unreserve calls. The - /// caller of this trait *SHOULD NOT* pre-reserve the deposit (though should ensure that it is reservable). + /// Note: The implementer of the trait (the leasing system) is expected to do all + /// reserve/unreserve calls. The caller of this trait *SHOULD NOT* pre-reserve the deposit + /// (though should ensure that it is reservable). /// - /// The lease will last from `period_begin` for `period_count` lease periods. It is undefined if the `para` - /// already has a slot leased during those periods. + /// The lease will last from `period_begin` for `period_count` lease periods. It is undefined if + /// the `para` already has a slot leased during those periods. /// /// Returns `Err` in the case of an error, and in which case nothing is changed. fn lease_out( @@ -128,8 +129,8 @@ pub trait Leaser { period_count: Self::LeasePeriod, ) -> Result<(), LeaseError>; - /// Return the amount of balance currently held in reserve on `leaser`'s account for leasing `para`. This won't - /// go down outside a lease period. + /// Return the amount of balance currently held in reserve on `leaser`'s account for leasing + /// `para`. This won't go down outside a lease period. fn deposit_held( para: ParaId, leaser: &Self::AccountId, @@ -147,7 +148,8 @@ pub trait Leaser { fn lease_period_index(block: BlockNumber) -> Option<(Self::LeasePeriod, bool)>; /// Returns true if the parachain already has a lease in any of lease periods in the inclusive - /// range `[first_period, last_period]`, intersected with the unbounded range [`current_lease_period`..] . + /// range `[first_period, last_period]`, intersected with the unbounded range + /// [`current_lease_period`..] . fn already_leased( para_id: ParaId, first_period: Self::LeasePeriod, @@ -169,7 +171,8 @@ pub enum AuctionStatus { /// will be `EndingPeriod(1, 5)`. EndingPeriod(BlockNumber, BlockNumber), /// We have completed the bidding process and are waiting for the VRF to return some acceptable - /// randomness to select the winner. The number represents how many blocks we have been waiting. + /// randomness to select the winner. The number represents how many blocks we have been + /// waiting. VrfDelay(BlockNumber), } @@ -224,9 +227,9 @@ pub trait Auctioneer { /// - `last_slot`: The last lease period index of the range to be bid on (inclusive). /// - `amount`: The total amount to be the bid for deposit over the range. /// - /// The account `Bidder` must have at least `amount` available as a free balance in `Currency`. The - /// implementation *MUST* remove or reserve `amount` funds from `bidder` and those funds should be returned - /// or freed once the bid is rejected or lease has ended. + /// The account `Bidder` must have at least `amount` available as a free balance in `Currency`. + /// The implementation *MUST* remove or reserve `amount` funds from `bidder` and those funds + /// should be returned or freed once the bid is rejected or lease has ended. fn place_bid( bidder: Self::AccountId, para: ParaId, diff --git a/runtime/kusama/src/xcm_config.rs b/runtime/kusama/src/xcm_config.rs index 59e32f2ca544..5725f54eddd5 100644 --- a/runtime/kusama/src/xcm_config.rs +++ b/runtime/kusama/src/xcm_config.rs @@ -63,8 +63,8 @@ parameter_types! { pub LocalCheckAccount: (AccountId, MintLocation) = (CheckAccount::get(), MintLocation::Local); } -/// The canonical means of converting a `MultiLocation` into an `AccountId`, used when we want to determine -/// the sovereign account controlled by a location. +/// The canonical means of converting a `MultiLocation` into an `AccountId`, used when we want to +/// determine the sovereign account controlled by a location. pub type SovereignAccountOf = ( // We can convert a child parachain using the standard `AccountId` conversion. ChildParachainConvertsVia, @@ -72,8 +72,8 @@ pub type SovereignAccountOf = ( AccountId32Aliases, ); -/// Our asset transactor. This is what allows us to interest with the runtime facilities from the point of -/// view of XCM-only concepts like `MultiLocation` and `MultiAsset`. +/// Our asset transactor. This is what allows us to interest with the runtime facilities from the +/// point of view of XCM-only concepts like `MultiLocation` and `MultiAsset`. /// /// Ours is only aware of the Balances pallet, which is mapped to `TokenLocation`. pub type LocalAssetTransactor = XcmCurrencyAdapter< @@ -360,8 +360,8 @@ parameter_types! { pub ReachableDest: Option = Some(Parachain(1000).into()); } -/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior location -/// of this chain. +/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior +/// location of this chain. pub type LocalOriginToLocation = ( // And a usual Signed origin to be used in XCM as a corresponding AccountId32 SignedToAccountId32, @@ -374,8 +374,8 @@ pub type StakingAdminToPlurality = /// Type to convert the Fellows origin to a Plurality `MultiLocation` value. pub type FellowsToPlurality = OriginToPluralityVoice; -/// Type to convert a pallet `Origin` type value into a `MultiLocation` value which represents an interior location -/// of this chain for a destination chain. +/// Type to convert a pallet `Origin` type value into a `MultiLocation` value which represents an +/// interior location of this chain for a destination chain. pub type LocalPalletOriginToLocation = ( // StakingAdmin origin to be used in XCM as a corresponding Plurality `MultiLocation` value. StakingAdminToPlurality, @@ -386,16 +386,17 @@ pub type LocalPalletOriginToLocation = ( impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; // We only allow the root, the council, fellows and the staking admin to send messages. - // This is basically safe to enable for everyone (safe the possibility of someone spamming the parachain - // if they're willing to pay the KSM to send from the Relay-chain), but it's useless until we bring in XCM v3 - // which will make `DescendOrigin` a bit more useful. + // This is basically safe to enable for everyone (safe the possibility of someone spamming the + // parachain if they're willing to pay the KSM to send from the Relay-chain), but it's useless + // until we bring in XCM v3 which will make `DescendOrigin` a bit more useful. type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; type XcmRouter = XcmRouter; // Anyone can execute XCM messages locally. type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin; type XcmExecuteFilter = Everything; type XcmExecutor = xcm_executor::XcmExecutor; - // Anyone is able to use teleportation regardless of who they are and what they want to teleport. + // Anyone is able to use teleportation regardless of who they are and what they want to + // teleport. type XcmTeleportFilter = Everything; // Anyone is able to use reserve transfers regardless of who they are and what they want to // transfer. @@ -450,7 +451,8 @@ fn karura_liquid_staking_xcm_has_sane_weight_upper_limt() { else { panic!("no Transact instruction found") }; - // should be pallet_utility.as_derivative { index: 0, call: pallet_staking::bond_extra { max_additional: 2490000000000 } } + // should be pallet_utility.as_derivative { index: 0, call: pallet_staking::bond_extra { + // max_additional: 2490000000000 } } let message_call = call.take_decoded().expect("can't decode Transact call"); let call_weight = message_call.get_dispatch_info().weight; // Ensure that the Transact instruction is giving a sensible `require_weight_at_most` value diff --git a/runtime/parachains/src/builder.rs b/runtime/parachains/src/builder.rs index e46c9f59b957..892e934e6dfc 100644 --- a/runtime/parachains/src/builder.rs +++ b/runtime/parachains/src/builder.rs @@ -174,7 +174,8 @@ impl BenchBuilder { configuration::Pallet::::config().max_validators.unwrap_or(200) } - /// Maximum number of validators participating in parachains consensus (a.k.a. active validators). + /// Maximum number of validators participating in parachains consensus (a.k.a. active + /// validators). fn max_validators(&self) -> u32 { self.max_validators.unwrap_or(Self::fallback_max_validators()) } @@ -186,8 +187,8 @@ impl BenchBuilder { self } - /// Maximum number of validators per core (a.k.a. max validators per group). This value is used if none is - /// explicitly set on the builder. + /// Maximum number of validators per core (a.k.a. max validators per group). This value is used + /// if none is explicitly set on the builder. pub(crate) fn fallback_max_validators_per_core() -> u32 { configuration::Pallet::::config().max_validators_per_core.unwrap_or(5) } @@ -479,7 +480,8 @@ impl BenchBuilder { /// Create backed candidates for `cores_with_backed_candidates`. You need these cores to be /// scheduled _within_ paras inherent, which requires marking the available bitfields as fully /// available. - /// - `cores_with_backed_candidates` Mapping of `para_id`/`core_idx`/`group_idx` seed to number of + /// - `cores_with_backed_candidates` Mapping of `para_id`/`core_idx`/`group_idx` seed to number + /// of /// validity votes. fn create_backed_candidates( &self, @@ -687,9 +689,9 @@ impl BenchBuilder { ); assert_eq!(inclusion::PendingAvailability::::iter().count(), used_cores as usize,); - // Mark all the used cores as occupied. We expect that their are `backed_and_concluding_cores` - // that are pending availability and that there are `used_cores - backed_and_concluding_cores ` - // which are about to be disputed. + // Mark all the used cores as occupied. We expect that their are + // `backed_and_concluding_cores` that are pending availability and that there are + // `used_cores - backed_and_concluding_cores ` which are about to be disputed. scheduler::AvailabilityCores::::set(vec![ Some(CoreOccupied::Parachain); used_cores as usize diff --git a/runtime/parachains/src/configuration.rs b/runtime/parachains/src/configuration.rs index 38a24211fb67..d4ad8619f16e 100644 --- a/runtime/parachains/src/configuration.rs +++ b/runtime/parachains/src/configuration.rs @@ -54,12 +54,12 @@ const LOG_TARGET: &str = "runtime::configuration"; serde::Deserialize, )] pub struct HostConfiguration { - // NOTE: This structure is used by parachains via merkle proofs. Therefore, this struct requires - // special treatment. + // NOTE: This structure is used by parachains via merkle proofs. Therefore, this struct + // requires special treatment. // - // A parachain requested this struct can only depend on the subset of this struct. Specifically, - // only a first few fields can be depended upon. These fields cannot be changed without - // corresponding migration of the parachains. + // A parachain requested this struct can only depend on the subset of this struct. + // Specifically, only a first few fields can be depended upon. These fields cannot be changed + // without corresponding migration of the parachains. /** * The parameters that are required for the parachains. */ @@ -88,9 +88,9 @@ pub struct HostConfiguration { pub hrmp_max_message_num_per_candidate: u32, /// The minimum period, in blocks, between which parachains can update their validation code. /// - /// This number is used to prevent parachains from spamming the relay chain with validation code - /// upgrades. The only thing it controls is the number of blocks the `UpgradeRestrictionSignal` - /// is set for the parachain in question. + /// This number is used to prevent parachains from spamming the relay chain with validation + /// code upgrades. The only thing it controls is the number of blocks the + /// `UpgradeRestrictionSignal` is set for the parachain in question. /// /// If PVF pre-checking is enabled this should be greater than the maximum number of blocks /// PVF pre-checking can take. Intuitively, this number should be greater than the duration @@ -113,14 +113,15 @@ pub struct HostConfiguration { /// been completed. /// /// Note, there are situations in which `expected_at` in the past. For example, if - /// [`chain_availability_period`] or [`thread_availability_period`] is less than the delay set by - /// this field or if PVF pre-check took more time than the delay. In such cases, the upgrade is - /// further at the earliest possible time determined by [`minimum_validation_upgrade_delay`]. + /// [`chain_availability_period`] or [`thread_availability_period`] is less than the delay set + /// by this field or if PVF pre-check took more time than the delay. In such cases, the upgrade + /// is further at the earliest possible time determined by + /// [`minimum_validation_upgrade_delay`]. /// /// The rationale for this delay has to do with relay-chain reversions. In case there is an - /// invalid candidate produced with the new version of the code, then the relay-chain can revert - /// [`validation_upgrade_delay`] many blocks back and still find the new code in the storage by - /// hash. + /// invalid candidate produced with the new version of the code, then the relay-chain can + /// revert [`validation_upgrade_delay`] many blocks back and still find the new code in the + /// storage by hash. /// /// [#4601]: https://github.com/paritytech/polkadot/issues/4601 pub validation_upgrade_delay: BlockNumber, @@ -179,13 +180,13 @@ pub struct HostConfiguration { /// Must be non-zero. pub group_rotation_frequency: BlockNumber, /// The availability period, in blocks, for parachains. This is the amount of blocks - /// after inclusion that validators have to make the block available and signal its availability to - /// the chain. + /// after inclusion that validators have to make the block available and signal its + /// availability to the chain. /// /// Must be at least 1. pub chain_availability_period: BlockNumber, - /// The availability period, in blocks, for parathreads. Same as the `chain_availability_period`, - /// but a differing timeout due to differing requirements. + /// The availability period, in blocks, for parathreads. Same as the + /// `chain_availability_period`, but a differing timeout due to differing requirements. /// /// Must be at least 1. pub thread_availability_period: BlockNumber, @@ -217,8 +218,8 @@ pub struct HostConfiguration { pub needed_approvals: u32, /// The number of samples to do of the `RelayVRFModulo` approval assignment criterion. pub relay_vrf_modulo_samples: u32, - /// If an active PVF pre-checking vote observes this many number of sessions it gets automatically - /// rejected. + /// If an active PVF pre-checking vote observes this many number of sessions it gets + /// automatically rejected. /// /// 0 means PVF pre-checking will be rejected on the first observed session unless the voting /// gained supermajority before that the session change. @@ -849,7 +850,8 @@ pub mod pallet { }) } - /// Sets the maximum total size of items that can present in a upward dispatch queue at once. + /// Sets the maximum total size of items that can present in a upward dispatch queue at + /// once. #[pallet::call_index(24)] #[pallet::weight(( T::WeightInfo::set_config_with_u32(), @@ -1257,8 +1259,8 @@ impl Pallet { // 3. pending_configs = [(cur+1, X)] // There is a pending configuration scheduled and it will be applied in the next session. // - // We will use X as the base configuration. We need to schedule a new configuration change - // for the `scheduled_session` and use X as the base for the new configuration. + // We will use X as the base configuration. We need to schedule a new configuration + // change for the `scheduled_session` and use X as the base for the new configuration. // // 4. pending_configs = [(cur+1, X), (cur+2, Y)] // There is a pending configuration change in the next session and for the scheduled diff --git a/runtime/parachains/src/configuration/migration/v7.rs b/runtime/parachains/src/configuration/migration/v7.rs index cdff80a31a3a..78a7cf9e4dc0 100644 --- a/runtime/parachains/src/configuration/migration/v7.rs +++ b/runtime/parachains/src/configuration/migration/v7.rs @@ -182,10 +182,12 @@ mod tests { // Steps: // 1. Go to Polkadot.js -> Developer -> Chain state -> Storage: https://polkadot.js.org/apps/#/chainstate // 2. Set these parameters: - // 2.1. selected state query: configuration; activeConfig(): PolkadotRuntimeParachainsConfigurationHostConfiguration - // 2.2. blockhash to query at: 0xf89d3ab5312c5f70d396dc59612f0aa65806c798346f9db4b35278baed2e0e53 (the hash of the block) - // 2.3. Note the value of encoded storage key -> 0x06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385 for the referenced block. - // 2.4. You'll also need the decoded values to update the test. + // 2.1. selected state query: configuration; activeConfig(): + // PolkadotRuntimeParachainsConfigurationHostConfiguration 2.2. blockhash to query at: + // 0xf89d3ab5312c5f70d396dc59612f0aa65806c798346f9db4b35278baed2e0e53 (the hash of the + // block) 2.3. Note the value of encoded storage key -> + // 0x06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385 for the referenced + // block. 2.4. You'll also need the decoded values to update the test. // 3. Go to Polkadot.js -> Developer -> Chain state -> Raw storage // 3.1 Enter the encoded storage key and you get the raw config. @@ -196,8 +198,8 @@ mod tests { let v6 = V6HostConfiguration::::decode(&mut &raw_config[..]).unwrap(); - // We check only a sample of the values here. If we missed any fields or messed up data types - // that would skew all the fields coming after. + // We check only a sample of the values here. If we missed any fields or messed up data + // types that would skew all the fields coming after. assert_eq!(v6.max_code_size, 3_145_728); assert_eq!(v6.validation_upgrade_cooldown, 200); assert_eq!(v6.max_pov_size, 5_242_880); @@ -209,8 +211,8 @@ mod tests { #[test] fn test_migrate_to_v7() { - // Host configuration has lots of fields. However, in this migration we only remove one field. - // The most important part to check are a couple of the last fields. We also pick + // Host configuration has lots of fields. However, in this migration we only remove one + // field. The most important part to check are a couple of the last fields. We also pick // extra fields to check arbitrarily, e.g. depending on their position (i.e. the middle) and // also their type. // @@ -291,7 +293,8 @@ mod tests { }); } - // Test that migration doesn't panic in case there're no pending configurations upgrades in pallet's storage. + // Test that migration doesn't panic in case there're no pending configurations upgrades in + // pallet's storage. #[test] fn test_migrate_to_v7_no_pending() { let v6 = V6HostConfiguration::::default(); diff --git a/runtime/parachains/src/disputes.rs b/runtime/parachains/src/disputes.rs index 7b03cde8ed28..cf2e99e7359a 100644 --- a/runtime/parachains/src/disputes.rs +++ b/runtime/parachains/src/disputes.rs @@ -887,8 +887,8 @@ impl Pallet { #[allow(deprecated)] >::remove_prefix(to_prune, None); - // This is larger, and will be extracted to the `shared` pallet for more proper pruning. - // TODO: https://github.com/paritytech/polkadot/issues/3469 + // This is larger, and will be extracted to the `shared` pallet for more proper + // pruning. TODO: https://github.com/paritytech/polkadot/issues/3469 #[allow(deprecated)] >::remove_prefix(to_prune, None); } @@ -1178,7 +1178,8 @@ impl Pallet { >::insert(&session, &candidate_hash, &summary.state); - // Freeze if the INVALID votes against some local candidate are above the byzantine threshold + // Freeze if the INVALID votes against some local candidate are above the byzantine + // threshold if summary.new_flags.contains(DisputeStateFlags::AGAINST_BYZANTINE) { if let Some(revert_to) = >::get(&session, &candidate_hash) { Self::revert_and_freeze(revert_to); diff --git a/runtime/parachains/src/disputes/migration.rs b/runtime/parachains/src/disputes/migration.rs index af216fa0408e..ccd367e41b36 100644 --- a/runtime/parachains/src/disputes/migration.rs +++ b/runtime/parachains/src/disputes/migration.rs @@ -79,14 +79,16 @@ pub mod v1 { } } - /// Migrates the pallet storage to the most recent version, checking and setting the `StorageVersion`. + /// Migrates the pallet storage to the most recent version, checking and setting the + /// `StorageVersion`. pub fn migrate_to_v1() -> Weight { let mut weight: Weight = Weight::zero(); // SpamSlots should not contain too many keys so removing everything at once should be safe let res = SpamSlots::::clear(u32::MAX, None); // `loops` is the number of iterations => used to calculate read weights - // `backend` is the number of keys removed from the backend => used to calculate write weights + // `backend` is the number of keys removed from the backend => used to calculate write + // weights weight = weight .saturating_add(T::DbWeight::get().reads_writes(res.loops as u64, res.backend as u64)); diff --git a/runtime/parachains/src/disputes/tests.rs b/runtime/parachains/src/disputes/tests.rs index 93dcd58264b2..acdba343274c 100644 --- a/runtime/parachains/src/disputes/tests.rs +++ b/runtime/parachains/src/disputes/tests.rs @@ -871,7 +871,8 @@ mod unconfirmed_disputes { use assert_matches::assert_matches; use sp_runtime::ModuleError; - // Shared initialization code between `test_unconfirmed_are_ignored` and `test_unconfirmed_disputes_cause_block_import_error` + // Shared initialization code between `test_unconfirmed_are_ignored` and + // `test_unconfirmed_disputes_cause_block_import_error` fn generate_dispute_statement_set_and_run_to_block() -> DisputeStatementSet { // 7 validators needed for byzantine threshold of 2. let v0 = ::Pair::generate().0; @@ -2060,7 +2061,8 @@ fn deduplication_and_sorting_works() { ) .unwrap_err(); - // assert ordering of local only disputes, and at the same time, and being free of duplicates + // assert ordering of local only disputes, and at the same time, and being free of + // duplicates assert_eq!(disputes_orig.len(), disputes.len() + 1); let are_these_equal = |a: &DisputeStatementSet, b: &DisputeStatementSet| { diff --git a/runtime/parachains/src/hrmp.rs b/runtime/parachains/src/hrmp.rs index c876749e853d..1be2fe57b1df 100644 --- a/runtime/parachains/src/hrmp.rs +++ b/runtime/parachains/src/hrmp.rs @@ -117,12 +117,12 @@ pub struct HrmpOpenChannelRequest { #[derive(Encode, Decode, TypeInfo)] #[cfg_attr(test, derive(Debug))] pub struct HrmpChannel { - // NOTE: This structure is used by parachains via merkle proofs. Therefore, this struct requires - // special treatment. + // NOTE: This structure is used by parachains via merkle proofs. Therefore, this struct + // requires special treatment. // - // A parachain requested this struct can only depend on the subset of this struct. Specifically, - // only a first few fields can be depended upon (See `AbridgedHrmpChannel`). These fields cannot - // be changed without corresponding migration of parachains. + // A parachain requested this struct can only depend on the subset of this struct. + // Specifically, only a first few fields can be depended upon (See `AbridgedHrmpChannel`). + // These fields cannot be changed without corresponding migration of parachains. /// The maximum number of messages that can be pending in the channel at once. pub max_capacity: u32, /// The maximum total size of the messages that can be pending in the channel at once. @@ -370,7 +370,8 @@ pub mod pallet { /// The HRMP watermark associated with each para. /// Invariant: - /// - each para `P` used here as a key should satisfy `Paras::is_valid_para(P)` within a session. + /// - each para `P` used here as a key should satisfy `Paras::is_valid_para(P)` within a + /// session. #[pallet::storage] pub type HrmpWatermarks = StorageMap<_, Twox64Concat, ParaId, BlockNumberFor>; @@ -968,9 +969,9 @@ impl Pallet { out_hrmp_msgs.iter().enumerate().map(|(idx, out_msg)| (idx as u32, out_msg)) { match last_recipient { - // the messages must be sorted in ascending order and there must be no two messages sent - // to the same recipient. Thus we can check that every recipient is strictly greater than - // the previous one. + // the messages must be sorted in ascending order and there must be no two messages + // sent to the same recipient. Thus we can check that every recipient is strictly + // greater than the previous one. Some(last_recipient) if out_msg.recipient <= last_recipient => return Err(OutboundHrmpAcceptanceErr::NotSorted { idx }), _ => last_recipient = Some(out_msg.recipient), diff --git a/runtime/parachains/src/inclusion/mod.rs b/runtime/parachains/src/inclusion/mod.rs index c71657d1ac43..f4ef3b95065e 100644 --- a/runtime/parachains/src/inclusion/mod.rs +++ b/runtime/parachains/src/inclusion/mod.rs @@ -17,8 +17,8 @@ //! The inclusion pallet is responsible for inclusion and availability of scheduled parachains //! and parathreads. //! -//! It is responsible for carrying candidates from being backable to being backed, and then from backed -//! to included. +//! It is responsible for carrying candidates from being backable to being backed, and then from +//! backed to included. use crate::{ configuration::{self, HostConfiguration}, @@ -76,8 +76,8 @@ impl WeightInfo for () { /// Maximum value that `config.max_upward_message_size` can be set to. /// -/// This is used for benchmarking sanely bounding relevant storage items. It is expected from the `configuration` -/// pallet to check these values before setting. +/// This is used for benchmarking sanely bounding relevant storage items. It is expected from the +/// `configuration` pallet to check these values before setting. pub const MAX_UPWARD_MESSAGE_SIZE_BOUND: u32 = 128 * 1024; /// A bitfield signed by a validator indicating that it is keeping its piece of the erasure-coding @@ -354,8 +354,8 @@ pub mod pallet { InvalidOutboundHrmp, /// The validation code hash of the candidate is not valid. InvalidValidationCodeHash, - /// The `para_head` hash in the candidate descriptor doesn't match the hash of the actual para head in the - /// commitments. + /// The `para_head` hash in the candidate descriptor doesn't match the hash of the actual + /// para head in the commitments. ParaHeadMismatch, /// A bitfield that references a freed core, /// either intentionally or as part of a concluded @@ -492,8 +492,8 @@ impl Pallet { /// /// Updates storage items `PendingAvailability` and `AvailabilityBitfields`. /// - /// Returns a `Vec` of `CandidateHash`es and their respective `AvailabilityCore`s that became available, - /// and cores free. + /// Returns a `Vec` of `CandidateHash`es and their respective `AvailabilityCore`s that became + /// available, and cores free. pub(crate) fn update_pending_availability_and_get_freed_cores( expected_bits: usize, validators: &[ValidatorId], @@ -530,8 +530,8 @@ impl Pallet { continue }; - // defensive check - this is constructed by loading the availability bitfield record, - // which is always `Some` if the core is occupied - that's why we're here. + // defensive check - this is constructed by loading the availability bitfield + // record, which is always `Some` if the core is occupied - that's why we're here. let validator_index = validator_index.0 as usize; if let Some(mut bit) = pending_availability.as_mut().and_then(|candidate_pending_availability| { @@ -591,8 +591,8 @@ impl Pallet { freed_cores } - /// Process candidates that have been backed. Provide the relay storage root, a set of candidates - /// and scheduled cores. + /// Process candidates that have been backed. Provide the relay storage root, a set of + /// candidates and scheduled cores. /// /// Both should be sorted ascending by core index, and the candidates should be a subset of /// scheduled cores. If these conditions are not met, the execution of the function fails. @@ -968,7 +968,8 @@ impl Pallet { }) } // make sure that the queue is not overfilled. - // we do it here only once since returning false invalidates the whole relay-chain block. + // we do it here only once since returning false invalidates the whole relay-chain + // block. if para_queue_size.saturating_add(msg_size as u64) > config.max_upward_queue_size as u64 { return Err(UmpAcceptanceCheckErr::TotalSizeExceeded { diff --git a/runtime/parachains/src/initializer.rs b/runtime/parachains/src/initializer.rs index b9ecc3038ca2..e006c38e6dec 100644 --- a/runtime/parachains/src/initializer.rs +++ b/runtime/parachains/src/initializer.rs @@ -17,7 +17,8 @@ //! This module is responsible for maintaining a consistent initialization order for all other //! parachains modules. It's also responsible for finalization and session change notifications. //! -//! This module can throw fatal errors if session-change notifications are received after initialization. +//! This module can throw fatal errors if session-change notifications are received after +//! initialization. use crate::{ configuration::{self, HostConfiguration}, @@ -128,9 +129,9 @@ pub mod pallet { /// Semantically a `bool`, but this guarantees it should never hit the trie, /// as this is cleared in `on_finalize` and Frame optimizes `None` values to be empty values. /// - /// As a `bool`, `set(false)` and `remove()` both lead to the next `get()` being false, but one of - /// them writes to the trie and one does not. This confusion makes `Option<()>` more suitable for - /// the semantics of this variable. + /// As a `bool`, `set(false)` and `remove()` both lead to the next `get()` being false, but one + /// of them writes to the trie and one does not. This confusion makes `Option<()>` more suitable + /// for the semantics of this variable. #[pallet::storage] pub(super) type HasInitialized = StorageValue<_, ()>; @@ -190,7 +191,8 @@ pub mod pallet { // Apply buffered session changes as the last thing. This way the runtime APIs and the // next block will observe the next session. // - // Note that we only apply the last session as all others lasted less than a block (weirdly). + // Note that we only apply the last session as all others lasted less than a block + // (weirdly). if let Some(BufferedSessionChange { session_index, validators, queued }) = BufferedSessionChanges::::take().pop() { diff --git a/runtime/parachains/src/origin.rs b/runtime/parachains/src/origin.rs index 14f8c3786c96..c83fec1b8923 100644 --- a/runtime/parachains/src/origin.rs +++ b/runtime/parachains/src/origin.rs @@ -38,7 +38,6 @@ where /// belongs to. /// /// This module fulfills only the single purpose of housing the `Origin` in `construct_runtime`. -/// // ideally, though, the `construct_runtime` should support a free-standing origin. #[frame_support::pallet] pub mod pallet { diff --git a/runtime/parachains/src/paras/mod.rs b/runtime/parachains/src/paras/mod.rs index 98c5075a4c94..4570bb2b13bd 100644 --- a/runtime/parachains/src/paras/mod.rs +++ b/runtime/parachains/src/paras/mod.rs @@ -43,10 +43,10 @@ //! //! The conditions that must be met before the para can use the new validation code are: //! -//! 1. The validation code should have been "soaked" in the storage for a given number of blocks. That -//! is, the validation code should have been stored in on-chain storage for some time, so that in -//! case of a revert with a non-extreme height difference, that validation code can still be -//! found on-chain. +//! 1. The validation code should have been "soaked" in the storage for a given number of blocks. +//! That is, the validation code should have been stored in on-chain storage for some time, so +//! that in case of a revert with a non-extreme height difference, that validation code can still +//! be found on-chain. //! //! 2. The validation code was vetted by the validators and declared as non-malicious in a processes //! known as PVF pre-checking. @@ -105,7 +105,6 @@ //! start──────▶│reset│ //! └─────┘ //! ``` -//! use crate::{ configuration, @@ -152,8 +151,8 @@ pub struct ReplacementTimes { /// first parablock included with a relay-parent with number >= this value. expected_at: N, /// The relay-chain block number at which the parablock activating the code upgrade was - /// actually included. This means considered included and available, so this is the time at which - /// that parablock enters the acceptance period in this fork of the relay-chain. + /// actually included. This means considered included and available, so this is the time at + /// which that parablock enters the acceptance period in this fork of the relay-chain. activated_at: N, } @@ -332,7 +331,8 @@ impl<'de> Deserialize<'de> for ParaKind { } } -// Manual encoding, decoding, and TypeInfo as the parakind field in ParaGenesisArgs used to be a bool +// Manual encoding, decoding, and TypeInfo as the parakind field in ParaGenesisArgs used to be a +// bool impl Encode for ParaKind { fn size_hint(&self) -> usize { true.size_hint() @@ -373,12 +373,15 @@ pub(crate) enum PvfCheckCause { Onboarding(ParaId), /// PVF vote was initiated by signalling of an upgrade by the given para. Upgrade { - /// The ID of the parachain that initiated or is waiting for the conclusion of pre-checking. + /// The ID of the parachain that initiated or is waiting for the conclusion of + /// pre-checking. id: ParaId, - /// The relay-chain block number of **inclusion** of candidate that that initiated the upgrade. + /// The relay-chain block number of **inclusion** of candidate that that initiated the + /// upgrade. /// - /// It's important to count upgrade enactment delay from the inclusion of this candidate instead - /// of its relay parent -- in order to keep PVF available in case of chain reversions. + /// It's important to count upgrade enactment delay from the inclusion of this candidate + /// instead of its relay parent -- in order to keep PVF available in case of chain + /// reversions. /// /// See https://github.com/paritytech/polkadot/issues/4601 for detailed explanation. included_at: BlockNumber, @@ -681,11 +684,11 @@ pub mod pallet { pub(super) type PastCodeMeta = StorageMap<_, Twox64Concat, ParaId, ParaPastCodeMeta>, ValueQuery>; - /// Which paras have past code that needs pruning and the relay-chain block at which the code was replaced. - /// Note that this is the actual height of the included block, not the expected height at which the - /// code upgrade would be applied, although they may be equal. - /// This is to ensure the entire acceptance period is covered, not an offset acceptance period starting - /// from the time at which the parachain perceives a code upgrade as having occurred. + /// Which paras have past code that needs pruning and the relay-chain block at which the code + /// was replaced. Note that this is the actual height of the included block, not the expected + /// height at which the code upgrade would be applied, although they may be equal. + /// This is to ensure the entire acceptance period is covered, not an offset acceptance period + /// starting from the time at which the parachain perceives a code upgrade as having occurred. /// Multiple entries for a single para are permitted. Ordered ascending by block number. #[pallet::storage] pub(super) type PastCodePruning = @@ -706,12 +709,13 @@ pub mod pallet { pub(super) type FutureCodeHash = StorageMap<_, Twox64Concat, ParaId, ValidationCodeHash>; - /// This is used by the relay-chain to communicate to a parachain a go-ahead with in the upgrade procedure. + /// This is used by the relay-chain to communicate to a parachain a go-ahead with in the upgrade + /// procedure. /// /// This value is absent when there are no upgrades scheduled or during the time the relay chain - /// performs the checks. It is set at the first relay-chain block when the corresponding parachain - /// can switch its upgrade function. As soon as the parachain's block is included, the value - /// gets reset to `None`. + /// performs the checks. It is set at the first relay-chain block when the corresponding + /// parachain can switch its upgrade function. As soon as the parachain's block is included, the + /// value gets reset to `None`. /// /// NOTE that this field is used by parachains via merkle storage proofs, therefore changing /// the format will require migration of parachains. @@ -896,8 +900,9 @@ pub mod pallet { /// Otherwise, the code will be added into the storage. Note that the code will be added /// into storage with reference count 0. This is to account the fact that there are no users /// for this code yet. The caller will have to make sure that this code eventually gets - /// used by some parachain or removed from the storage to avoid storage leaks. For the latter - /// prefer to use the `poke_unused_validation_code` dispatchable to raw storage manipulation. + /// used by some parachain or removed from the storage to avoid storage leaks. For the + /// latter prefer to use the `poke_unused_validation_code` dispatchable to raw storage + /// manipulation. /// /// This function is mainly meant to be used for upgrading parachains that do not follow /// the go-ahead signal while the PVF pre-checking feature is enabled. @@ -1569,10 +1574,11 @@ impl Pallet { match cause { PvfCheckCause::Onboarding(id) => { - // Here we need to undo everything that was done during `schedule_para_initialize`. - // Essentially, the logic is similar to offboarding, with exception that before - // actual onboarding the parachain did not have a chance to reach to upgrades. - // Therefore we can skip all the upgrade related storage items here. + // Here we need to undo everything that was done during + // `schedule_para_initialize`. Essentially, the logic is similar to offboarding, + // with exception that before actual onboarding the parachain did not have a + // chance to reach to upgrades. Therefore we can skip all the upgrade related + // storage items here. weight += T::DbWeight::get().writes(3); UpcomingParasGenesis::::remove(&id); CurrentCodeHash::::remove(&id); @@ -1629,8 +1635,8 @@ impl Pallet { // // - Doing it within the context of the PR that introduces this change is undesirable, since // it is already a big change, and that change would require a migration. Moreover, if we - // run the new version of the runtime, there will be less things to worry about during - // the eventual proper migration. + // run the new version of the runtime, there will be less things to worry about during the + // eventual proper migration. // // - This data type already is used for generating genesis, and changing it will probably // introduce some unnecessary burden. @@ -1641,8 +1647,8 @@ impl Pallet { // get rid of hashing of the validation code when onboarding. // // - Replace `validation_code` with a sentinel value: an empty vector. This should be fine - // as long we do not allow registering parachains with empty code. At the moment of writing - // this should already be the case. + // as long we do not allow registering parachains with empty code. At the moment of + // writing this should already be the case. // // - Empty value is treated as the current code is already inserted during the onboarding. // @@ -1670,7 +1676,8 @@ impl Pallet { /// /// Will return error if either is true: /// - /// - para is not a stable parachain or parathread (i.e. [`ParaLifecycle::is_stable`] is `false`) + /// - para is not a stable parachain or parathread (i.e. [`ParaLifecycle::is_stable`] is + /// `false`) /// - para has a pending upgrade. /// - para has unprocessed messages in its UMP queue. /// @@ -1683,7 +1690,8 @@ impl Pallet { // ongoing PVF pre-checking votes. It also removes some nasty edge cases. // // However, an upcoming upgrade on its own imposes no restrictions. An upgrade is enacted - // with a new para head, so if a para never progresses we still should be able to offboard it. + // with a new para head, so if a para never progresses we still should be able to offboard + // it. // // This implicitly assumes that the given para exists, i.e. it's lifecycle != None. if let Some(future_code_hash) = FutureCodeHash::::get(&id) { @@ -1768,13 +1776,14 @@ impl Pallet { /// the relay-chain block number will be determined at which the upgrade will take place. We /// call that block `expected_at`. /// - /// Once the candidate with the relay-parent >= `expected_at` is enacted, the new validation code - /// will be applied. Therefore, the new code will be used to validate the next candidate. + /// Once the candidate with the relay-parent >= `expected_at` is enacted, the new validation + /// code will be applied. Therefore, the new code will be used to validate the next candidate. /// /// The new code should not be equal to the current one, otherwise the upgrade will be aborted. /// If there is already a scheduled code upgrade for the para, this is a no-op. /// - /// Inclusion block number specifies relay parent which enacted candidate initiating the upgrade. + /// Inclusion block number specifies relay parent which enacted candidate initiating the + /// upgrade. pub(crate) fn schedule_code_upgrade( id: ParaId, new_code: ValidationCode, @@ -1905,8 +1914,8 @@ impl Pallet { // We increase the code RC here in any case. Intuitively the parachain that requested this // action is now a user of that PVF. // - // If the result of the pre-checking is reject, then we would decrease the RC for each cause, - // including the current. + // If the result of the pre-checking is reject, then we would decrease the RC for each + // cause, including the current. // // If the result of the pre-checking is accept, then we do nothing to the RC because the PVF // will continue be used by the same users. @@ -1918,9 +1927,9 @@ impl Pallet { weight } - /// Note that a para has progressed to a new head, where the new head was executed in the context - /// of a relay-chain block with given number. This will apply pending code upgrades based - /// on the relay-parent block number provided. + /// Note that a para has progressed to a new head, where the new head was executed in the + /// context of a relay-chain block with given number. This will apply pending code upgrades + /// based on the relay-parent block number provided. pub(crate) fn note_new_head( id: ParaId, new_head: HeadData, diff --git a/runtime/parachains/src/paras/tests.rs b/runtime/parachains/src/paras/tests.rs index 2bf30bb273e5..4a3be6d7d50e 100644 --- a/runtime/parachains/src/paras/tests.rs +++ b/runtime/parachains/src/paras/tests.rs @@ -649,7 +649,8 @@ fn submit_code_change_when_not_allowed_is_err() { Paras::schedule_code_upgrade(para_id, newer_code.clone(), 2, &Configuration::config()); assert_eq!( FutureCodeUpgrades::::get(¶_id), - Some(1 + validation_upgrade_delay), // did not change since the same assertion from the last time. + Some(1 + validation_upgrade_delay), /* did not change since the same assertion from + * the last time. */ ); assert_eq!(FutureCodeHash::::get(¶_id), Some(new_code.hash())); check_code_is_not_stored(&newer_code); @@ -1554,8 +1555,9 @@ fn increase_code_ref_doesnt_have_allergy_on_add_trusted_validation_code() { #[test] fn add_trusted_validation_code_insta_approval() { - // In particular, this tests that `kick_off_pvf_check` reacts to the `add_trusted_validation_code` - // and uses the `CodeByHash::contains_key` which is what `add_trusted_validation_code` uses. + // In particular, this tests that `kick_off_pvf_check` reacts to the + // `add_trusted_validation_code` and uses the `CodeByHash::contains_key` which is what + // `add_trusted_validation_code` uses. let para_id = 100.into(); let validation_code = ValidationCode(vec![1, 2, 3]); let validation_upgrade_delay = 25; diff --git a/runtime/parachains/src/paras_inherent/mod.rs b/runtime/parachains/src/paras_inherent/mod.rs index 61be0d4adae8..a40a3422a669 100644 --- a/runtime/parachains/src/paras_inherent/mod.rs +++ b/runtime/parachains/src/paras_inherent/mod.rs @@ -285,8 +285,9 @@ pub mod pallet { } impl Pallet { - /// Create the `ParachainsInherentData` that gets passed to [`Self::enter`] in [`Self::create_inherent`]. - /// This code is pulled out of [`Self::create_inherent`] so it can be unit tested. + /// Create the `ParachainsInherentData` that gets passed to [`Self::enter`] in + /// [`Self::create_inherent`]. This code is pulled out of [`Self::create_inherent`] so it can be + /// unit tested. fn create_inherent_inner(data: &InherentData) -> Option>> { let parachains_inherent_data = match data.get_data(&Self::INHERENT_IDENTIFIER) { Ok(Some(d)) => d, @@ -313,11 +314,11 @@ impl Pallet { /// The given inherent data is processed and state is altered accordingly. If any data could /// not be applied (inconsitencies, weight limit, ...) it is removed. /// - /// When called from `create_inherent` the `context` must be set to `ProcessInherentDataContext::ProvideInherent` - /// so it guarantees the invariant that inherent is not overweight. - /// - /// It is **mandatory** that calls from `enter` set `context` to `ProcessInherentDataContext::Enter` to ensure - /// the weight invariant is checked. + /// When called from `create_inherent` the `context` must be set to + /// `ProcessInherentDataContext::ProvideInherent` so it guarantees the invariant that inherent + /// is not overweight. + /// It is **mandatory** that calls from `enter` set `context` to + /// `ProcessInherentDataContext::Enter` to ensure the weight invariant is checked. /// /// Returns: Result containing processed inherent data and weight, the processed inherent would /// consume. @@ -379,8 +380,8 @@ impl Pallet { let dispatch_class = DispatchClass::Mandatory; let max_block_weight_full = ::BlockWeights::get(); log::debug!(target: LOG_TARGET, "Max block weight: {}", max_block_weight_full.max_block); - // Get max block weight for the mandatory class if defined, otherwise total max weight of - // the block. + // Get max block weight for the mandatory class if defined, otherwise total max weight + // of the block. let max_weight = max_block_weight_full .per_class .get(dispatch_class) @@ -412,7 +413,8 @@ impl Pallet { T::DisputesHandler::filter_dispute_data(set, post_conclusion_acceptance_period) }; - // Limit the disputes first, since the following statements depend on the votes include here. + // Limit the disputes first, since the following statements depend on the votes include + // here. let (checked_disputes_sets, checked_disputes_sets_consumed_weight) = limit_and_sanitize_disputes::( disputes, @@ -449,8 +451,8 @@ impl Pallet { } all_weight_after } else { - // This check is performed in the context of block execution. Ensures inherent weight invariants guaranteed - // by `create_inherent_data` for block authorship. + // This check is performed in the context of block execution. Ensures inherent weight + // invariants guaranteed by `create_inherent_data` for block authorship. if all_weight_before.any_gt(max_block_weight) { log::error!( "Overweight para inherent data reached the runtime {:?}: {} > {}", @@ -714,13 +716,14 @@ fn random_sel Weight>( /// If there is sufficient space, all bitfields and all candidates /// will be included. /// -/// Otherwise tries to include all disputes, and then tries to fill the remaining space with bitfields and then candidates. +/// Otherwise tries to include all disputes, and then tries to fill the remaining space with +/// bitfields and then candidates. /// -/// The selection process is random. For candidates, there is an exception for code upgrades as they are preferred. -/// And for disputes, local and older disputes are preferred (see `limit_and_sanitize_disputes`). -/// for backed candidates, since with a increasing number of parachains their chances of -/// inclusion become slim. All backed candidates are checked beforehands in `fn create_inherent_inner` -/// which guarantees sanity. +/// The selection process is random. For candidates, there is an exception for code upgrades as they +/// are preferred. And for disputes, local and older disputes are preferred (see +/// `limit_and_sanitize_disputes`). for backed candidates, since with a increasing number of +/// parachains their chances of inclusion become slim. All backed candidates are checked +/// beforehands in `fn create_inherent_inner` which guarantees sanity. /// /// Assumes disputes are already filtered by the time this is called. /// @@ -977,7 +980,8 @@ fn compute_entropy(parent_hash: T::Hash) -> [u8; 32] { /// 1. If weight is exceeded by locals, pick the older ones (lower indices) /// until the weight limit is reached. /// -/// Returns the consumed weight amount, that is guaranteed to be less than the provided `max_consumable_weight`. +/// Returns the consumed weight amount, that is guaranteed to be less than the provided +/// `max_consumable_weight`. fn limit_and_sanitize_disputes< T: Config, CheckValidityFn: FnMut(DisputeStatementSet) -> Option, diff --git a/runtime/parachains/src/paras_inherent/tests.rs b/runtime/parachains/src/paras_inherent/tests.rs index c2e80e7525fb..faf52b555ba3 100644 --- a/runtime/parachains/src/paras_inherent/tests.rs +++ b/runtime/parachains/src/paras_inherent/tests.rs @@ -68,9 +68,9 @@ mod enter { } #[test] - // Validate that if we create 2 backed candidates which are assigned to 2 cores that will be freed via - // becoming fully available, the backed candidates will not be filtered out in `create_inherent` and - // will not cause `enter` to early. + // Validate that if we create 2 backed candidates which are assigned to 2 cores that will be + // freed via becoming fully available, the backed candidates will not be filtered out in + // `create_inherent` and will not cause `enter` to early. fn include_backed_candidates() { new_test_ext(MockGenesisConfig::default()).execute_with(|| { let dispute_statements = BTreeMap::new(); @@ -252,7 +252,8 @@ mod enter { let expected_para_inherent_data = scenario.data.clone(); // Check the para inherent data is as expected: - // * 1 bitfield per validator (5 validators per core, 3 disputes => 3 cores, 15 validators) + // * 1 bitfield per validator (5 validators per core, 3 disputes => 3 cores, 15 + // validators) assert_eq!(expected_para_inherent_data.bitfields.len(), 15); // * 0 backed candidate per core assert_eq!(expected_para_inherent_data.backed_candidates.len(), 0); @@ -389,7 +390,8 @@ mod enter { let expected_para_inherent_data = scenario.data.clone(); // Check the para inherent data is as expected: - // * 1 bitfield per validator (4 validators per core, 2 backed candidates, 3 disputes => 4*5 = 20) + // * 1 bitfield per validator (4 validators per core, 2 backed candidates, 3 disputes => + // 4*5 = 20) assert_eq!(expected_para_inherent_data.bitfields.len(), 20); // * 2 backed candidates assert_eq!(expected_para_inherent_data.backed_candidates.len(), 2); @@ -408,7 +410,8 @@ mod enter { Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); assert!(limit_inherent_data != expected_para_inherent_data); - // Three disputes is over weight (see previous test), so we expect to only see 2 disputes + // Three disputes is over weight (see previous test), so we expect to only see 2 + // disputes assert_eq!(limit_inherent_data.disputes.len(), 2); // Ensure disputes are filtered as expected assert_eq!(limit_inherent_data.disputes[0].session, 1); @@ -418,7 +421,8 @@ mod enter { limit_inherent_data.bitfields.len(), expected_para_inherent_data.bitfields.len() ); - // Ensure that all backed candidates are filtered out as either would make the block over weight + // Ensure that all backed candidates are filtered out as either would make the block + // over weight assert_eq!(limit_inherent_data.backed_candidates.len(), 0); assert_ok!(Pallet::::enter( @@ -470,7 +474,8 @@ mod enter { let expected_para_inherent_data = scenario.data.clone(); // Check the para inherent data is as expected: - // * 1 bitfield per validator (5 validators per core, 2 backed candidates, 3 disputes => 4*5 = 20), + // * 1 bitfield per validator (5 validators per core, 2 backed candidates, 3 disputes => + // 4*5 = 20), assert_eq!(expected_para_inherent_data.bitfields.len(), 25); // * 2 backed candidates, assert_eq!(expected_para_inherent_data.backed_candidates.len(), 2); @@ -493,14 +498,16 @@ mod enter { assert!(inherent_data_weight(&limit_inherent_data) .all_lte(max_block_weight_proof_size_adjusted())); - // Three disputes is over weight (see previous test), so we expect to only see 2 disputes + // Three disputes is over weight (see previous test), so we expect to only see 2 + // disputes assert_eq!(limit_inherent_data.disputes.len(), 2); // Ensure disputes are filtered as expected assert_eq!(limit_inherent_data.disputes[0].session, 1); assert_eq!(limit_inherent_data.disputes[1].session, 2); // Ensure all bitfields are included as these are still not over weight assert_eq!(limit_inherent_data.bitfields.len(), 20,); - // Ensure that all backed candidates are filtered out as either would make the block over weight + // Ensure that all backed candidates are filtered out as either would make the block + // over weight assert_eq!(limit_inherent_data.backed_candidates.len(), 0); assert_ok!(Pallet::::enter( @@ -551,7 +558,8 @@ mod enter { let expected_para_inherent_data = scenario.data.clone(); // Check the para inherent data is as expected: - // * 1 bitfield per validator (5 validators per core, 2 backed candidates, 3 disputes => 5*5 = 25) + // * 1 bitfield per validator (5 validators per core, 2 backed candidates, 3 disputes => + // 5*5 = 25) assert_eq!(expected_para_inherent_data.bitfields.len(), 25); // * 2 backed candidates assert_eq!(expected_para_inherent_data.backed_candidates.len(), 2); @@ -632,7 +640,8 @@ mod enter { .any_lt(inherent_data_weight(&expected_para_inherent_data))); // Check the para inherent data is as expected: - // * 1 bitfield per validator (5 validators per core, 2 backed candidates, 3 disputes => 5*5 = 25) + // * 1 bitfield per validator (5 validators per core, 2 backed candidates, 3 disputes => + // 5*5 = 25) assert_eq!(expected_para_inherent_data.bitfields.len(), 25); // * 2 backed candidates assert_eq!(expected_para_inherent_data.backed_candidates.len(), 2); @@ -645,7 +654,8 @@ mod enter { let limit_inherent_data = Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); - // Expect that inherent data is filtered to include only 1 backed candidate and 2 disputes + // Expect that inherent data is filtered to include only 1 backed candidate and 2 + // disputes assert!(limit_inherent_data != expected_para_inherent_data); assert!( max_block_weight_proof_size_adjusted() @@ -727,7 +737,8 @@ mod enter { .unwrap(); let limit_inherent_data = Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); - // Expect that inherent data is filtered to include only 1 backed candidate and 2 disputes + // Expect that inherent data is filtered to include only 1 backed candidate and 2 + // disputes assert!(limit_inherent_data != expected_para_inherent_data); assert!( max_block_weight_proof_size_adjusted() @@ -792,7 +803,8 @@ mod enter { let limit_inherent_data = Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); - // Expect that inherent data is filtered to include only 1 backed candidate and 2 disputes + // Expect that inherent data is filtered to include only 1 backed candidate and 2 + // disputes assert!(limit_inherent_data != expected_para_inherent_data); assert!( max_block_weight_proof_size_adjusted() @@ -841,7 +853,8 @@ mod enter { .any_lt(inherent_data_weight(&expected_para_inherent_data))); // Check the para inherent data is as expected: - // * 1 bitfield per validator (5 validators per core, 2 backed candidates, 0 disputes => 2*5 = 10) + // * 1 bitfield per validator (5 validators per core, 2 backed candidates, 0 disputes => + // 2*5 = 10) assert_eq!(expected_para_inherent_data.bitfields.len(), 10); // * 2 backed candidates assert_eq!(expected_para_inherent_data.backed_candidates.len(), 2); @@ -854,7 +867,8 @@ mod enter { let limit_inherent_data = Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); - // Expect that inherent data is filtered to include only 1 backed candidate and 2 disputes + // Expect that inherent data is filtered to include only 1 backed candidate and 2 + // disputes assert!(limit_inherent_data != expected_para_inherent_data); assert!( max_block_weight_proof_size_adjusted() @@ -903,7 +917,8 @@ mod enter { .any_lt(inherent_data_weight(&expected_para_inherent_data))); // Check the para inherent data is as expected: - // * 1 bitfield per validator (5 validators per core, 30 backed candidates, 3 disputes => 5*33 = 165) + // * 1 bitfield per validator (5 validators per core, 30 backed candidates, 3 disputes + // => 5*33 = 165) assert_eq!(expected_para_inherent_data.bitfields.len(), 165); // * 30 backed candidates assert_eq!(expected_para_inherent_data.backed_candidates.len(), 30); diff --git a/runtime/parachains/src/runtime_api_impl/v5.rs b/runtime/parachains/src/runtime_api_impl/v5.rs index 1257c0c91702..4c9c8c911f62 100644 --- a/runtime/parachains/src/runtime_api_impl/v5.rs +++ b/runtime/parachains/src/runtime_api_impl/v5.rs @@ -393,7 +393,8 @@ pub fn pvfs_require_precheck() -> Vec { >::pvfs_require_precheck() } -/// Returns the validation code hash for the given parachain making the given `OccupiedCoreAssumption`. +/// Returns the validation code hash for the given parachain making the given +/// `OccupiedCoreAssumption`. pub fn validation_code_hash( para_id: ParaId, assumption: OccupiedCoreAssumption, diff --git a/runtime/parachains/src/scheduler.rs b/runtime/parachains/src/scheduler.rs index b69c16ae8d01..6882834187dc 100644 --- a/runtime/parachains/src/scheduler.rs +++ b/runtime/parachains/src/scheduler.rs @@ -21,19 +21,20 @@ //! - Scheduling parachains and parathreads //! //! It aims to achieve these tasks with these goals in mind: -//! - It should be possible to know at least a block ahead-of-time, ideally more, -//! which validators are going to be assigned to which parachains. -//! - Parachains that have a candidate pending availability in this fork of the chain -//! should not be assigned. +//! - It should be possible to know at least a block ahead-of-time, ideally more, which validators +//! are going to be assigned to which parachains. +//! - Parachains that have a candidate pending availability in this fork of the chain should not be +//! assigned. //! - Validator assignments should not be gameable. Malicious cartels should not be able to //! manipulate the scheduler to assign themselves as desired. -//! - High or close to optimal throughput of parachains and parathreads. Work among validator groups should be balanced. +//! - High or close to optimal throughput of parachains and parathreads. Work among validator groups +//! should be balanced. //! //! The Scheduler manages resource allocation using the concept of "Availability Cores". //! There will be one availability core for each parachain, and a fixed number of cores //! used for multiplexing parathreads. Validators will be partitioned into groups, with the same -//! number of groups as availability cores. Validator groups will be assigned to different availability cores -//! over time. +//! number of groups as availability cores. Validator groups will be assigned to different +//! availability cores over time. use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::BlockNumberFor; @@ -169,8 +170,9 @@ pub mod pallet { /// broader set of Polkadot validators, but instead just the subset used for parachains during /// this session. /// - /// Bound: The number of cores is the sum of the numbers of parachains and parathread multiplexers. - /// Reasonably, 100-1000. The dominant factor is the number of validators: safe upper bound at 10k. + /// Bound: The number of cores is the sum of the numbers of parachains and parathread + /// multiplexers. Reasonably, 100-1000. The dominant factor is the number of validators: safe + /// upper bound at 10k. #[pallet::storage] #[pallet::getter(fn validator_groups)] pub(crate) type ValidatorGroups = StorageValue<_, Vec>, ValueQuery>; @@ -182,8 +184,8 @@ pub mod pallet { #[pallet::storage] pub(crate) type ParathreadQueue = StorageValue<_, ParathreadClaimQueue, ValueQuery>; - /// One entry for each availability core. Entries are `None` if the core is not currently occupied. Can be - /// temporarily `Some` if scheduled but not occupied. + /// One entry for each availability core. Entries are `None` if the core is not currently + /// occupied. Can be temporarily `Some` if scheduled but not occupied. /// The i'th parachain belongs to the i'th core, with the remaining cores all being /// parathread-multiplexers. /// @@ -197,11 +199,13 @@ pub mod pallet { /// An index used to ensure that only one claim on a parathread exists in the queue or is /// currently being handled by an occupied core. /// - /// Bounded by the number of parathread cores and scheduling lookahead. Reasonably, 10 * 50 = 500. + /// Bounded by the number of parathread cores and scheduling lookahead. Reasonably, 10 * 50 = + /// 500. #[pallet::storage] pub(crate) type ParathreadClaimIndex = StorageValue<_, Vec, ValueQuery>; - /// The block number where the session start occurred. Used to track how many group rotations have occurred. + /// The block number where the session start occurred. Used to track how many group rotations + /// have occurred. /// /// Note that in the context of parachains modules the session change is signaled during /// the block and enacted at the end of the block (at the finalization stage, to be exact). @@ -215,8 +219,8 @@ pub mod pallet { /// /// Bounded by the number of cores: one for each parachain and parathread multiplexer. /// - /// The value contained here will not be valid after the end of a block. Runtime APIs should be used to determine scheduled cores/ - /// for the upcoming block. + /// The value contained here will not be valid after the end of a block. Runtime APIs should be + /// used to determine scheduled cores/ for the upcoming block. #[pallet::storage] #[pallet::getter(fn scheduled)] pub(crate) type Scheduled = StorageValue<_, Vec, ValueQuery>; @@ -380,8 +384,9 @@ impl Pallet { }) } - /// Free unassigned cores. Provide a list of cores that should be considered newly-freed along with the reason - /// for them being freed. The list is assumed to be sorted in ascending order by core index. + /// Free unassigned cores. Provide a list of cores that should be considered newly-freed along + /// with the reason for them being freed. The list is assumed to be sorted in ascending order by + /// core index. pub(crate) fn free_cores(just_freed_cores: impl IntoIterator) { let config = >::config(); @@ -403,8 +408,8 @@ impl Pallet { }) }, FreedReason::TimedOut => { - // If a parathread candidate times out, it's not the collator's fault, - // so we don't increment retries. + // If a parathread candidate times out, it's not the collator's + // fault, so we don't increment retries. ParathreadQueue::::mutate(|queue| { queue.enqueue_entry(entry, config.parathread_cores); }) @@ -417,9 +422,9 @@ impl Pallet { }) } - /// Schedule all unassigned cores, where possible. Provide a list of cores that should be considered - /// newly-freed along with the reason for them being freed. The list is assumed to be sorted in - /// ascending order by core index. + /// Schedule all unassigned cores, where possible. Provide a list of cores that should be + /// considered newly-freed along with the reason for them being freed. The list is assumed to be + /// sorted in ascending order by core index. pub(crate) fn schedule( just_freed_cores: impl IntoIterator, now: BlockNumberFor, @@ -455,10 +460,10 @@ impl Pallet { // check the first entry already scheduled with core index >= than the one we // are looking at. 3 cases: - // 1. No such entry, clearly this core is not scheduled, so we need to schedule and put at the end. - // 2. Entry exists and has same index as the core we are inspecting. do not schedule again. - // 3. Entry exists and has higher index than the core we are inspecting. schedule and note - // insertion position. + // 1. No such entry, clearly this core is not scheduled, so we need to schedule + // and put at the end. 2. Entry exists and has same index as the core we are + // inspecting. do not schedule again. 3. Entry exists and has higher index than + // the core we are inspecting. schedule and note insertion position. prev_scheduled_in_order.peek().map_or( Some(scheduled.len()), |(idx_in_scheduled, assign)| { @@ -509,8 +514,9 @@ impl Pallet { } } - // at this point, because `Scheduled` is guaranteed to be sorted and we navigated unassigned - // core indices in ascending order, we can enact the updates prepared by the previous actions. + // at this point, because `Scheduled` is guaranteed to be sorted and we navigated + // unassigned core indices in ascending order, we can enact the updates prepared by the + // previous actions. // // while inserting, we have to account for the amount of insertions already done. // @@ -522,20 +528,20 @@ impl Pallet { scheduled.insert(insert_at, to_insert); } - // scheduled is guaranteed to be sorted after this point because it was sorted before, and we - // applied sorted updates at their correct positions, accounting for the offsets of previous - // insertions. + // scheduled is guaranteed to be sorted after this point because it was sorted before, + // and we applied sorted updates at their correct positions, accounting for the offsets + // of previous insertions. } Scheduled::::set(scheduled); ParathreadQueue::::set(parathread_queue); } - /// Note that the given cores have become occupied. Behavior undefined if any of the given cores were not scheduled - /// or the slice is not sorted ascending by core index. + /// Note that the given cores have become occupied. Behavior undefined if any of the given cores + /// were not scheduled or the slice is not sorted ascending by core index. /// - /// Complexity: O(n) in the number of scheduled cores, which is capped at the number of total cores. - /// This is efficient in the case that most scheduled cores are occupied. + /// Complexity: O(n) in the number of scheduled cores, which is capped at the number of total + /// cores. This is efficient in the case that most scheduled cores are occupied. pub(crate) fn occupied(now_occupied: &[CoreIndex]) { if now_occupied.is_empty() { return @@ -568,8 +574,8 @@ impl Pallet { AvailabilityCores::::set(availability_cores); } - /// Get the para (chain or thread) ID assigned to a particular core or index, if any. Core indices - /// out of bounds will return `None`, as will indices of unassigned cores. + /// Get the para (chain or thread) ID assigned to a particular core or index, if any. Core + /// indices out of bounds will return `None`, as will indices of unassigned cores. pub(crate) fn core_para(core_index: CoreIndex) -> Option { let cores = AvailabilityCores::::get(); match cores.get(core_index.0 as usize).and_then(|c| c.as_ref()) { @@ -587,8 +593,9 @@ impl Pallet { ValidatorGroups::::get().get(group_index.0 as usize).map(|g| g.clone()) } - /// Get the group assigned to a specific core by index at the current block number. Result undefined if the core index is unknown - /// or the block number is less than the session start index. + /// Get the group assigned to a specific core by index at the current block number. Result + /// undefined if the core index is unknown or the block number is less than the session start + /// index. pub(crate) fn group_assigned_to_core( core: CoreIndex, at: BlockNumberFor, @@ -622,10 +629,11 @@ impl Pallet { /// Returns an optional predicate that should be used for timing out occupied cores. /// - /// If `None`, no timing-out should be done. The predicate accepts the index of the core, and the - /// block number since which it has been occupied, and the respective parachain and parathread - /// timeouts, i.e. only within `max(config.chain_availability_period, config.thread_availability_period)` - /// of the last rotation would this return `Some`, unless there are no rotations. + /// If `None`, no timing-out should be done. The predicate accepts the index of the core, and + /// the block number since which it has been occupied, and the respective parachain and + /// parathread timeouts, i.e. only within `max(config.chain_availability_period, + /// config.thread_availability_period)` of the last rotation would this return `Some`, unless + /// there are no rotations. /// /// This really should not be a box, but is working around a compiler limitation filed here: /// https://github.com/rust-lang/rust/issues/73226 diff --git a/runtime/parachains/src/scheduler/tests.rs b/runtime/parachains/src/scheduler/tests.rs index 2188bb15b2e5..c4830f4bf253 100644 --- a/runtime/parachains/src/scheduler/tests.rs +++ b/runtime/parachains/src/scheduler/tests.rs @@ -56,7 +56,8 @@ fn run_to_block( if let Some(notification) = new_session(b + 1) { let mut notification_with_session_index = notification; - // We will make every session change trigger an action queue. Normally this may require 2 or more session changes. + // We will make every session change trigger an action queue. Normally this may require + // 2 or more session changes. if notification_with_session_index.session_index == SessionIndex::default() { notification_with_session_index.session_index = ParasShared::scheduled_session(); } @@ -104,8 +105,9 @@ fn default_config() -> HostConfiguration { scheduling_lookahead: 2, parathread_retries: 1, // This field does not affect anything that scheduler does. However, `HostConfiguration` - // is still a subject to consistency test. It requires that `minimum_validation_upgrade_delay` - // is greater than `chain_availability_period` and `thread_availability_period`. + // is still a subject to consistency test. It requires that + // `minimum_validation_upgrade_delay` is greater than `chain_availability_period` and + // `thread_availability_period`. minimum_validation_upgrade_delay: 6, ..Default::default() } @@ -626,9 +628,9 @@ fn schedule_schedules_including_just_freed() { assert!(Scheduler::scheduled().is_empty()); } - // add a couple more parathread claims - the claim on `b` will go to the 3rd parathread core (4) - // and the claim on `d` will go back to the 1st parathread core (2). The claim on `e` then - // will go for core `3`. + // add a couple more parathread claims - the claim on `b` will go to the 3rd parathread core + // (4) and the claim on `d` will go back to the 1st parathread core (2). The claim on `e` + // then will go for core `3`. Scheduler::add_parathread_claim(ParathreadClaim(thread_b, collator.clone())); Scheduler::add_parathread_claim(ParathreadClaim(thread_d, collator.clone())); Scheduler::add_parathread_claim(ParathreadClaim(thread_e, collator.clone())); diff --git a/runtime/parachains/src/shared.rs b/runtime/parachains/src/shared.rs index 857e671f0ee4..6b50bcce4054 100644 --- a/runtime/parachains/src/shared.rs +++ b/runtime/parachains/src/shared.rs @@ -62,8 +62,8 @@ pub mod pallet { pub(super) type ActiveValidatorIndices = StorageValue<_, Vec, ValueQuery>; - /// The parachain attestation keys of the validators actively participating in parachain consensus. - /// This should be the same length as `ActiveValidatorIndices`. + /// The parachain attestation keys of the validators actively participating in parachain + /// consensus. This should be the same length as `ActiveValidatorIndices`. #[pallet::storage] #[pallet::getter(fn active_validator_keys)] pub(super) type ActiveValidatorKeys = StorageValue<_, Vec, ValueQuery>; diff --git a/runtime/parachains/src/util.rs b/runtime/parachains/src/util.rs index d5b339b679e3..aa07ef080055 100644 --- a/runtime/parachains/src/util.rs +++ b/runtime/parachains/src/util.rs @@ -48,7 +48,7 @@ pub fn make_persisted_validation_data( /// the order of the `active` vec, the second item will contain the rest, in the original order. /// /// ```ignore -/// split_active_subset(active, all).0 == take_active_subset(active, all) +/// split_active_subset(active, all).0 == take_active_subset(active, all) /// ``` pub fn split_active_subset(active: &[ValidatorIndex], all: &[T]) -> (Vec, Vec) { let active_set: BTreeSet<_> = active.iter().cloned().collect(); @@ -76,7 +76,7 @@ pub fn split_active_subset(active: &[ValidatorIndex], all: &[T]) -> (V /// Uses `split_active_subset` and concatenates the inactive to the active vec. /// /// ```ignore -/// split_active_subset(active, all)[0..active.len()]) == take_active_subset(active, all) +/// split_active_subset(active, all)[0..active.len()]) == take_active_subset(active, all) /// ``` pub fn take_active_subset_and_inactive(active: &[ValidatorIndex], all: &[T]) -> Vec { let (mut a, mut i) = split_active_subset(active, all); diff --git a/runtime/polkadot/src/governance/old.rs b/runtime/polkadot/src/governance/old.rs index f4c2655a784a..4c7b503472f2 100644 --- a/runtime/polkadot/src/governance/old.rs +++ b/runtime/polkadot/src/governance/old.rs @@ -45,7 +45,8 @@ impl pallet_democracy::Config for Runtime { pallet_collective::EnsureProportionAtLeast, frame_system::EnsureRoot, >; - /// A 60% super-majority can have the next scheduled referendum be a straight majority-carries vote. + /// A 60% super-majority can have the next scheduled referendum be a straight majority-carries + /// vote. type ExternalMajorityOrigin = EitherOfDiverse< pallet_collective::EnsureProportionAtLeast, frame_system::EnsureRoot, diff --git a/runtime/polkadot/src/xcm_config.rs b/runtime/polkadot/src/xcm_config.rs index 867253ea0346..faae2e1d2619 100644 --- a/runtime/polkadot/src/xcm_config.rs +++ b/runtime/polkadot/src/xcm_config.rs @@ -63,8 +63,8 @@ parameter_types! { pub LocalCheckAccount: (AccountId, MintLocation) = (CheckAccount::get(), MintLocation::Local); } -/// The canonical means of converting a `MultiLocation` into an `AccountId`, used when we want to determine -/// the sovereign account controlled by a location. +/// The canonical means of converting a `MultiLocation` into an `AccountId`, used when we want to +/// determine the sovereign account controlled by a location. pub type SovereignAccountOf = ( // We can convert a child parachain using the standard `AccountId` conversion. ChildParachainConvertsVia, @@ -72,8 +72,8 @@ pub type SovereignAccountOf = ( AccountId32Aliases, ); -/// Our asset transactor. This is what allows us to interact with the runtime assets from the point of -/// view of XCM-only concepts like `MultiLocation` and `MultiAsset`. +/// Our asset transactor. This is what allows us to interact with the runtime assets from the point +/// of view of XCM-only concepts like `MultiLocation` and `MultiAsset`. /// /// Ours is only aware of the Balances pallet, which is mapped to `TokenLocation`. pub type LocalAssetTransactor = XcmCurrencyAdapter< @@ -369,8 +369,8 @@ pub type CouncilToPlurality = BackingToPlurality< CouncilBodyId, >; -/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior location -/// of this chain. +/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior +/// location of this chain. pub type LocalOriginToLocation = ( CouncilToPlurality, // And a usual Signed origin to be used in XCM as a corresponding AccountId32 @@ -385,11 +385,11 @@ pub type StakingAdminToPlurality = pub type FellowshipAdminToPlurality = OriginToPluralityVoice; -/// Type to convert a pallet `Origin` type value into a `MultiLocation` value which represents an interior location -/// of this chain for a destination chain. +/// Type to convert a pallet `Origin` type value into a `MultiLocation` value which represents an +/// interior location of this chain for a destination chain. pub type LocalPalletOriginToLocation = ( - // We allow an origin from the Collective pallet to be used in XCM as a corresponding Plurality of the - // `Unit` body. + // We allow an origin from the Collective pallet to be used in XCM as a corresponding Plurality + // of the `Unit` body. CouncilToPlurality, // StakingAdmin origin to be used in XCM as a corresponding Plurality `MultiLocation` value. StakingAdminToPlurality, @@ -399,7 +399,8 @@ pub type LocalPalletOriginToLocation = ( impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; - // We only allow the root, the council, the fellowship admin and the staking admin to send messages. + // We only allow the root, the council, the fellowship admin and the staking admin to send + // messages. type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; type XcmRouter = XcmRouter; // Anyone can execute XCM messages locally... diff --git a/runtime/rococo/src/xcm_config.rs b/runtime/rococo/src/xcm_config.rs index 714a4f69e759..75e06391c56b 100644 --- a/runtime/rococo/src/xcm_config.rs +++ b/runtime/rococo/src/xcm_config.rs @@ -56,8 +56,8 @@ parameter_types! { pub type LocationConverter = (ChildParachainConvertsVia, AccountId32Aliases); -/// Our asset transactor. This is what allows us to interest with the runtime facilities from the point of -/// view of XCM-only concepts like `MultiLocation` and `MultiAsset`. +/// Our asset transactor. This is what allows us to interest with the runtime facilities from the +/// point of view of XCM-only concepts like `MultiLocation` and `MultiAsset`. /// /// Ours is only aware of the Balances pallet, which is mapped to `RocLocation`. pub type LocalAssetTransactor = XcmCurrencyAdapter< @@ -342,11 +342,11 @@ pub type CouncilToPlurality = BackingToPlurality< CouncilBodyId, >; -/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior location -/// of this chain. +/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior +/// location of this chain. pub type LocalOriginToLocation = ( - // We allow an origin from the Collective pallet to be used in XCM as a corresponding Plurality of the - // `Unit` body. + // We allow an origin from the Collective pallet to be used in XCM as a corresponding Plurality + // of the `Unit` body. CouncilToPlurality, // And a usual Signed origin to be used in XCM as a corresponding AccountId32 SignedToAccountId32, diff --git a/runtime/test-runtime/src/lib.rs b/runtime/test-runtime/src/lib.rs index c9f3aa6cb203..d7594e67c12a 100644 --- a/runtime/test-runtime/src/lib.rs +++ b/runtime/test-runtime/src/lib.rs @@ -355,8 +355,8 @@ impl pallet_staking::Config for Runtime { type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = onchain::OnChainExecution; - // Use the nominator map to iter voter AND no-ops for all SortedListProvider hooks. The migration - // to bags-list is a no-op, but the storage version will be updated. + // Use the nominator map to iter voter AND no-ops for all SortedListProvider hooks. The + // migration to bags-list is a no-op, but the storage version will be updated. type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; type NominationsQuota = pallet_staking::FixedNominationsQuota; diff --git a/runtime/test-runtime/src/xcm_config.rs b/runtime/test-runtime/src/xcm_config.rs index 21ce8c877dc3..2113bbae66ad 100644 --- a/runtime/test-runtime/src/xcm_config.rs +++ b/runtime/test-runtime/src/xcm_config.rs @@ -38,8 +38,8 @@ parameter_types! { pub const UniversalLocation: xcm::latest::InteriorMultiLocation = xcm::latest::Junctions::Here; } -/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior location -/// of this chain. +/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior +/// location of this chain. pub type LocalOriginToLocation = ( // And a usual Signed origin to be used in XCM as a corresponding AccountId32 SignedToAccountId32, diff --git a/runtime/westend/src/lib.rs b/runtime/westend/src/lib.rs index 4b4659442cff..9bb5a6db613d 100644 --- a/runtime/westend/src/lib.rs +++ b/runtime/westend/src/lib.rs @@ -338,8 +338,8 @@ pub struct MaybeSignedPhase; impl Get for MaybeSignedPhase { fn get() -> u32 { - // 1 day = 4 eras -> 1 week = 28 eras. We want to disable signed phase once a week to test the fallback unsigned - // phase is able to compute elections on Westend. + // 1 day = 4 eras -> 1 week = 28 eras. We want to disable signed phase once a week to test + // the fallback unsigned phase is able to compute elections on Westend. if Staking::current_era().unwrap_or(1) % 28 == 0 { 0 } else { diff --git a/runtime/westend/src/xcm_config.rs b/runtime/westend/src/xcm_config.rs index d6a3feb3bc0f..a83c38c9f66f 100644 --- a/runtime/westend/src/xcm_config.rs +++ b/runtime/westend/src/xcm_config.rs @@ -271,8 +271,8 @@ impl xcm_executor::Config for XcmConfig { type Aliasers = Nothing; } -/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior location -/// of this chain. +/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior +/// location of this chain. pub type LocalOriginToLocation = ( // And a usual Signed origin to be used in XCM as a corresponding AccountId32 SignedToAccountId32, diff --git a/rustfmt.toml b/rustfmt.toml index 542c561edd42..e2c4a037f37f 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,12 +1,20 @@ # Basic +edition = "2021" hard_tabs = true max_width = 100 use_small_heuristics = "Max" + # Imports imports_granularity = "Crate" reorder_imports = true + # Consistency newline_style = "Unix" + +# Format comments +comment_width = 100 +wrap_comments = true + # Misc chain_width = 80 spaces_around_ranges = false @@ -18,7 +26,3 @@ match_block_trailing_comma = true trailing_comma = "Vertical" trailing_semicolon = false use_field_init_shorthand = true -ignore = [ - "bridges", -] -edition = "2021" diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index b45c4c1be890..ea629f189dc8 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -114,4 +114,5 @@ cargo-clippy: - .docker-env - .test-refs script: + - cargo version && cargo clippy --version - SKIP_WASM_BUILD=1 env -u RUSTFLAGS cargo clippy --locked --all-targets diff --git a/statement-table/src/generic.rs b/statement-table/src/generic.rs index fcd261b438b3..9aa445becce0 100644 --- a/statement-table/src/generic.rs +++ b/statement-table/src/generic.rs @@ -96,8 +96,8 @@ pub enum ValidityDoubleVote { } impl ValidityDoubleVote { - /// Deconstruct this misbehavior into two `(Statement, Signature)` pairs, erasing the information - /// about precisely what the problem was. + /// Deconstruct this misbehavior into two `(Statement, Signature)` pairs, erasing the + /// information about precisely what the problem was. pub fn deconstruct( self, ) -> ((Statement, Signature), (Statement, Signature)) @@ -124,8 +124,8 @@ pub enum DoubleSign { } impl DoubleSign { - /// Deconstruct this misbehavior into a statement with two signatures, erasing the information about - /// precisely where in the process the issue was detected. + /// Deconstruct this misbehavior into a statement with two signatures, erasing the information + /// about precisely where in the process the issue was detected. pub fn deconstruct(self) -> (Statement, Signature, Signature) { match self { Self::Seconded(candidate, a, b) => (Statement::Seconded(candidate), a, b), @@ -555,10 +555,11 @@ impl<'a, Ctx: Context> Iterator for DrainMisbehaviors<'a, Ctx> { type Item = (Ctx::AuthorityId, MisbehaviorFor); fn next(&mut self) -> Option { - // Note: this implementation will prematurely return `None` if `self.drain.next()` ever returns a - // tuple whose vector is empty. That will never currently happen, as the only modification - // to the backing map is currently via `drain` and `entry(...).or_default().push(...)`. - // However, future code changes might change that property. + // Note: this implementation will prematurely return `None` if `self.drain.next()` ever + // returns a tuple whose vector is empty. That will never currently happen, as the only + // modification to the backing map is currently via `drain` and + // `entry(...).or_default().push(...)`. However, future code changes might change that + // property. self.maybe_item().or_else(|| { self.in_progress = self.drain.next().map(Into::into); self.maybe_item() diff --git a/tests/common.rs b/tests/common.rs index 39b92732498f..940a0c6f18d0 100644 --- a/tests/common.rs +++ b/tests/common.rs @@ -76,7 +76,8 @@ async fn wait_n_finalized_blocks_from(n: usize, url: &str) { /// This is hack to get the actual binded sockaddr because /// polkadot assigns a random port if the specified port was already binded. /// -/// You must call `Command::new("cmd").stdout(process::Stdio::piped()).stderr(process::Stdio::piped())` +/// You must call +/// `Command::new("cmd").stdout(process::Stdio::piped()).stderr(process::Stdio::piped())` /// for this to work. pub fn find_ws_url_from_output(read: impl Read + Send) -> (String, String) { let mut data = String::new(); diff --git a/utils/staking-miner/src/opts.rs b/utils/staking-miner/src/opts.rs index 819511b55b18..ecffe4531014 100644 --- a/utils/staking-miner/src/opts.rs +++ b/utils/staking-miner/src/opts.rs @@ -58,8 +58,8 @@ pub(crate) enum Command { #[derive(Debug, Clone, Parser)] #[cfg_attr(test, derive(PartialEq))] pub(crate) struct MonitorConfig { - /// The path to a file containing the seed of the account. If the file is not found, the seed is - /// used as-is. + /// The path to a file containing the seed of the account. If the file is not found, the seed + /// is used as-is. /// /// Can also be provided via the `SEED` environment variable. /// @@ -88,9 +88,11 @@ pub(crate) struct MonitorConfig { /// /// `--submission-strategy always`: always submit. /// - /// `--submission-strategy "percent-better "`: submit if the submission is `n` percent better. + /// `--submission-strategy "percent-better "`: submit if the submission is `n` percent + /// better. /// - /// `--submission-strategy "no-worse-than "`: submit if submission is no more than `n` percent worse. + /// `--submission-strategy "no-worse-than "`: submit if submission is no more than + /// `n` percent worse. #[clap(long, default_value = "if-leading")] pub submission_strategy: SubmissionStrategy, @@ -100,8 +102,8 @@ pub(crate) struct MonitorConfig { /// a delay can be enforced to avoid submitting at /// "same time" and risk potential races with other miners. /// - /// When this is enabled and there are competing solutions, your solution might not be submitted - /// if the scores are equal. + /// When this is enabled and there are competing solutions, your solution might not be + /// submitted if the scores are equal. #[arg(long, default_value_t = 0)] pub delay: usize, } @@ -109,8 +111,8 @@ pub(crate) struct MonitorConfig { #[derive(Debug, Clone, Parser)] #[cfg_attr(test, derive(PartialEq))] pub(crate) struct DryRunConfig { - /// The path to a file containing the seed of the account. If the file is not found, the seed is - /// used as-is. + /// The path to a file containing the seed of the account. If the file is not found, the seed + /// is used as-is. /// /// Can also be provided via the `SEED` environment variable. /// @@ -165,8 +167,8 @@ pub enum SubmissionStrategy { IfLeading, /// Submit if we are no worse than `Perbill` worse than the best. ClaimNoWorseThan(Perbill), - /// Submit if we are leading, or if the solution that's leading is more that the given `Perbill` - /// better than us. This helps detect obviously fake solutions and still combat them. + /// Submit if we are leading, or if the solution that's leading is more that the given + /// `Perbill` better than us. This helps detect obviously fake solutions and still combat them. ClaimBetterThan(Perbill), } @@ -189,8 +191,8 @@ pub(crate) enum Solver { /// * --submission-strategy if-leading: only submit if leading /// * --submission-strategy always: always submit /// * --submission-strategy "percent-better ": submit if submission is `n` percent better. -/// * --submission-strategy "no-worse-than": submit if submission is no more than `n` percent worse. -/// +/// * --submission-strategy "no-worse-than": submit if submission is no more than `n` +/// percent worse. impl FromStr for SubmissionStrategy { type Err = String; diff --git a/utils/staking-miner/src/rpc.rs b/utils/staking-miner/src/rpc.rs index a95e89191a49..2d25616e2a17 100644 --- a/utils/staking-miner/src/rpc.rs +++ b/utils/staking-miner/src/rpc.rs @@ -61,7 +61,8 @@ pub trait RpcApi { at: Option<&Hash>, ) -> RpcResult>; - /// Dry run an extrinsic at a given block. Return SCALE encoded [`sp_runtime::ApplyExtrinsicResult`]. + /// Dry run an extrinsic at a given block. Return SCALE encoded + /// [`sp_runtime::ApplyExtrinsicResult`]. #[method(name = "system_dryRun")] async fn dry_run(&self, extrinsic: &Bytes, at: Option) -> RpcResult; diff --git a/xcm/pallet-xcm-benchmarks/src/generic/mod.rs b/xcm/pallet-xcm-benchmarks/src/generic/mod.rs index e5fce008a0f2..195066ee5b48 100644 --- a/xcm/pallet-xcm-benchmarks/src/generic/mod.rs +++ b/xcm/pallet-xcm-benchmarks/src/generic/mod.rs @@ -52,7 +52,8 @@ pub mod pallet { /// If set to `Err`, benchmarks which rely on an `exchange_asset` will be skipped. fn worst_case_asset_exchange() -> Result<(MultiAssets, MultiAssets), BenchmarkError>; - /// A `(MultiLocation, Junction)` that is one of the `UniversalAliases` configured by the XCM executor. + /// A `(MultiLocation, Junction)` that is one of the `UniversalAliases` configured by the + /// XCM executor. /// /// If set to `Err`, benchmarks which rely on a universal alias will be skipped. fn universal_alias() -> Result<(MultiLocation, Junction), BenchmarkError>; @@ -75,13 +76,15 @@ pub mod pallet { /// Return an unlocker, owner and assets that can be locked and unlocked. fn unlockable_asset() -> Result<(MultiLocation, MultiLocation, MultiAsset), BenchmarkError>; - /// A `(MultiLocation, NetworkId, InteriorMultiLocation)` we can successfully export message to. + /// A `(MultiLocation, NetworkId, InteriorMultiLocation)` we can successfully export message + /// to. /// /// If set to `Err`, benchmarks which rely on `export_message` will be skipped. fn export_message_origin_and_destination( ) -> Result<(MultiLocation, NetworkId, InteriorMultiLocation), BenchmarkError>; - /// A `(MultiLocation, MultiLocation)` that is one of the `Aliasers` configured by the XCM executor. + /// A `(MultiLocation, MultiLocation)` that is one of the `Aliasers` configured by the XCM + /// executor. /// /// If set to `Err`, benchmarks which rely on a universal alias will be skipped. fn alias_origin() -> Result<(MultiLocation, MultiLocation), BenchmarkError>; diff --git a/xcm/pallet-xcm/src/lib.rs b/xcm/pallet-xcm/src/lib.rs index d52d5ba24271..aefcf30910ed 100644 --- a/xcm/pallet-xcm/src/lib.rs +++ b/xcm/pallet-xcm/src/lib.rs @@ -195,9 +195,9 @@ pub mod pallet { /// The type used to actually dispatch an XCM to its destination. type XcmRouter: SendXcm; - /// Required origin for executing XCM messages, including the teleport functionality. If successful, - /// then it resolves to `MultiLocation` which exists as an interior location within this chain's XCM - /// context. + /// Required origin for executing XCM messages, including the teleport functionality. If + /// successful, then it resolves to `MultiLocation` which exists as an interior location + /// within this chain's XCM context. type ExecuteXcmOrigin: EnsureOrigin< ::RuntimeOrigin, Success = MultiLocation, @@ -212,7 +212,8 @@ pub mod pallet { /// Our XCM filter which messages to be teleported using the dedicated extrinsic must pass. type XcmTeleportFilter: Contains<(MultiLocation, Vec)>; - /// Our XCM filter which messages to be reserve-transferred using the dedicated extrinsic must pass. + /// Our XCM filter which messages to be reserve-transferred using the dedicated extrinsic + /// must pass. type XcmReserveTransferFilter: Contains<(MultiLocation, Vec)>; /// Means of measuring the weight consumed by an XCM message locally. @@ -290,8 +291,8 @@ pub mod pallet { /// Query response has been received and query is removed. The registered notification has /// been dispatched and executed successfully. Notified { query_id: QueryId, pallet_index: u8, call_index: u8 }, - /// Query response has been received and query is removed. The registered notification could - /// not be dispatched because the dispatch weight is greater than the maximum weight + /// Query response has been received and query is removed. The registered notification + /// could not be dispatched because the dispatch weight is greater than the maximum weight /// originally budgeted by this runtime for the query result. NotifyOverweight { query_id: QueryId, @@ -371,7 +372,8 @@ pub mod pallet { cost: MultiAssets, message_id: XcmHash, }, - /// We have requested that a remote chain stops sending us XCM version change notifications. + /// We have requested that a remote chain stops sending us XCM version change + /// notifications. VersionNotifyUnrequested { destination: MultiLocation, cost: MultiAssets, @@ -402,8 +404,8 @@ pub mod pallet { /// The desired destination was unreachable, generally because there is a no way of routing /// to it. Unreachable, - /// There was some other issue (i.e. not to do with routing) in sending the message. Perhaps - /// a lack of space for buffering the message. + /// There was some other issue (i.e. not to do with routing) in sending the message. + /// Perhaps a lack of space for buffering the message. SendFailure, /// The message execution fails the filter. Filtered, @@ -791,12 +793,13 @@ pub mod pallet { /// with all fees taken as needed from the asset. /// /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. - /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send - /// from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain. - /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be - /// an `AccountId32` value. - /// - `assets`: The assets to be withdrawn. The first item should be the currency used to to pay the fee on the - /// `dest` side. May not be empty. + /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, + /// Parachain(..))` to send from parachain to parachain, or `X1(Parachain(..))` to send + /// from relay to parachain. + /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will + /// generally be an `AccountId32` value. + /// - `assets`: The assets to be withdrawn. The first item should be the currency used to to + /// pay the fee on the `dest` side. May not be empty. /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay /// fees. #[pallet::call_index(1)] @@ -839,12 +842,13 @@ pub mod pallet { /// with all fees taken as needed from the asset. /// /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. - /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send - /// from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain. - /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be - /// an `AccountId32` value. - /// - `assets`: The assets to be withdrawn. This should include the assets used to pay the fee on the - /// `dest` side. + /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, + /// Parachain(..))` to send from parachain to parachain, or `X1(Parachain(..))` to send + /// from relay to parachain. + /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will + /// generally be an `AccountId32` value. + /// - `assets`: The assets to be withdrawn. This should include the assets used to pay the + /// fee on the `dest` side. /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay /// fees. #[pallet::call_index(2)] @@ -885,12 +889,12 @@ pub mod pallet { /// An event is deposited indicating whether `msg` could be executed completely or only /// partially. /// - /// No more than `max_weight` will be used in its attempted execution. If this is less than the - /// maximum amount of weight that the message could take to be executed, then no execution - /// attempt will be made. + /// No more than `max_weight` will be used in its attempted execution. If this is less than + /// the maximum amount of weight that the message could take to be executed, then no + /// execution attempt will be made. /// - /// NOTE: A successful return to this does *not* imply that the `msg` was executed successfully - /// to completion; only that *some* of it was executed. + /// NOTE: A successful return to this does *not* imply that the `msg` was executed + /// successfully to completion; only that *some* of it was executed. #[pallet::call_index(3)] #[pallet::weight(max_weight.saturating_add(T::WeightInfo::execute()))] pub fn execute( @@ -1012,12 +1016,13 @@ pub mod pallet { /// at risk. /// /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. - /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send - /// from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain. - /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be - /// an `AccountId32` value. - /// - `assets`: The assets to be withdrawn. This should include the assets used to pay the fee on the - /// `dest` side. + /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, + /// Parachain(..))` to send from parachain to parachain, or `X1(Parachain(..))` to send + /// from relay to parachain. + /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will + /// generally be an `AccountId32` value. + /// - `assets`: The assets to be withdrawn. This should include the assets used to pay the + /// fee on the `dest` side. /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay /// fees. /// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase. @@ -1063,12 +1068,13 @@ pub mod pallet { /// at risk. /// /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. - /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send - /// from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain. - /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be - /// an `AccountId32` value. - /// - `assets`: The assets to be withdrawn. The first item should be the currency used to to pay the fee on the - /// `dest` side. May not be empty. + /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, + /// Parachain(..))` to send from parachain to parachain, or `X1(Parachain(..))` to send + /// from relay to parachain. + /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will + /// generally be an `AccountId32` value. + /// - `assets`: The assets to be withdrawn. The first item should be the currency used to to + /// pay the fee on the `dest` side. May not be empty. /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay /// fees. /// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase. @@ -1561,13 +1567,13 @@ impl Pallet { /// /// - `message`: The message whose outcome should be reported. /// - `responder`: The origin from which a response should be expected. - /// - `notify`: A dispatchable function which will be called once the outcome of `message` - /// is known. It may be a dispatchable in any pallet of the local chain, but other than - /// the usual origin, it must accept exactly two arguments: `query_id: QueryId` and - /// `outcome: Response`, and in that order. It should expect that the origin is - /// `Origin::Response` and will contain the responder's location. - /// - `timeout`: The block number after which it is permissible for `notify` not to be - /// called even if a response is received. + /// - `notify`: A dispatchable function which will be called once the outcome of `message` is + /// known. It may be a dispatchable in any pallet of the local chain, but other than the usual + /// origin, it must accept exactly two arguments: `query_id: QueryId` and `outcome: Response`, + /// and in that order. It should expect that the origin is `Origin::Response` and will contain + /// the responder's location. + /// - `timeout`: The block number after which it is permissible for `notify` not to be called + /// even if a response is received. /// /// `report_outcome_notify` may return an error if the `responder` is not invertible. /// @@ -2090,8 +2096,8 @@ impl OnResponse for Pallet { call_index, }; Self::deposit_event(e); - // Not much to do with the result as it is. It's up to the parachain to ensure that the - // message makes sense. + // Not much to do with the result as it is. It's up to the + // parachain to ensure that the message makes sense. error_and_info.post_info.actual_weight }, } @@ -2159,8 +2165,8 @@ where } } -/// Filter for `MultiLocation` to find those which represent a strict majority approval of an identified -/// plurality. +/// Filter for `MultiLocation` to find those which represent a strict majority approval of an +/// identified plurality. /// /// May reasonably be used with `EnsureXcm`. pub struct IsMajorityOfBody(PhantomData<(Prefix, Body)>); @@ -2186,8 +2192,8 @@ impl, Body: Get> Contains } } -/// `EnsureOrigin` implementation succeeding with a `MultiLocation` value to recognize and filter the -/// `Origin::Xcm` item. +/// `EnsureOrigin` implementation succeeding with a `MultiLocation` value to recognize and filter +/// the `Origin::Xcm` item. pub struct EnsureXcm(PhantomData); impl, F: Contains> EnsureOrigin for EnsureXcm where diff --git a/xcm/src/double_encoded.rs b/xcm/src/double_encoded.rs index 2c8957d9ed76..c4c1276fad8d 100644 --- a/xcm/src/double_encoded.rs +++ b/xcm/src/double_encoded.rs @@ -73,7 +73,8 @@ impl DoubleEncoded { impl DoubleEncoded { /// Decode the inner encoded value and store it. - /// Returns a reference to the value in case of success and `Err(())` in case the decoding fails. + /// Returns a reference to the value in case of success and `Err(())` in case the decoding + /// fails. pub fn ensure_decoded(&mut self) -> Result<&T, ()> { if self.decoded.is_none() { self.decoded = @@ -92,8 +93,9 @@ impl DoubleEncoded { .ok_or(()) } - /// Provides an API similar to `TryInto` that allows fallible conversion to the inner value type. - /// `TryInto` implementation would collide with std blanket implementation based on `TryFrom`. + /// Provides an API similar to `TryInto` that allows fallible conversion to the inner value + /// type. `TryInto` implementation would collide with std blanket implementation based on + /// `TryFrom`. pub fn try_into(mut self) -> Result { self.ensure_decoded()?; self.decoded.ok_or(()) diff --git a/xcm/src/lib.rs b/xcm/src/lib.rs index 2e8ea78b5c15..a012c5f53fbf 100644 --- a/xcm/src/lib.rs +++ b/xcm/src/lib.rs @@ -360,7 +360,8 @@ impl TryFrom> for v3::Xcm { } } -/// Convert an `Xcm` datum into a `VersionedXcm`, based on a destination `MultiLocation` which will interpret it. +/// Convert an `Xcm` datum into a `VersionedXcm`, based on a destination `MultiLocation` which will +/// interpret it. pub trait WrapVersion { fn wrap_version( dest: &latest::MultiLocation, @@ -368,7 +369,8 @@ pub trait WrapVersion { ) -> Result, ()>; } -/// `()` implementation does nothing with the XCM, just sending with whatever version it was authored as. +/// `()` implementation does nothing with the XCM, just sending with whatever version it was +/// authored as. impl WrapVersion for () { fn wrap_version( _: &latest::MultiLocation, @@ -378,7 +380,8 @@ impl WrapVersion for () { } } -/// `WrapVersion` implementation which attempts to always convert the XCM to version 2 before wrapping it. +/// `WrapVersion` implementation which attempts to always convert the XCM to version 2 before +/// wrapping it. pub struct AlwaysV2; impl WrapVersion for AlwaysV2 { fn wrap_version( @@ -389,7 +392,8 @@ impl WrapVersion for AlwaysV2 { } } -/// `WrapVersion` implementation which attempts to always convert the XCM to version 3 before wrapping it. +/// `WrapVersion` implementation which attempts to always convert the XCM to version 3 before +/// wrapping it. pub struct AlwaysV3; impl WrapVersion for AlwaysV3 { fn wrap_version( diff --git a/xcm/src/v2/junction.rs b/xcm/src/v2/junction.rs index be075a31fe32..73a502999462 100644 --- a/xcm/src/v2/junction.rs +++ b/xcm/src/v2/junction.rs @@ -32,13 +32,13 @@ pub enum Junction { /// /// Generally used when the context is a Polkadot Relay-chain. Parachain(#[codec(compact)] u32), - /// A 32-byte identifier for an account of a specific network that is respected as a sovereign endpoint within - /// the context. + /// A 32-byte identifier for an account of a specific network that is respected as a sovereign + /// endpoint within the context. /// /// Generally used when the context is a Substrate-based chain. AccountId32 { network: NetworkId, id: [u8; 32] }, - /// An 8-byte index for an account of a specific network that is respected as a sovereign endpoint within - /// the context. + /// An 8-byte index for an account of a specific network that is respected as a sovereign + /// endpoint within the context. /// /// May be used when the context is a Frame-based chain and includes e.g. an indices pallet. AccountIndex64 { @@ -46,8 +46,8 @@ pub enum Junction { #[codec(compact)] index: u64, }, - /// A 20-byte identifier for an account of a specific network that is respected as a sovereign endpoint within - /// the context. + /// A 20-byte identifier for an account of a specific network that is respected as a sovereign + /// endpoint within the context. /// /// May be used when the context is an Ethereum or Bitcoin chain or smart-contract. AccountKey20 { network: NetworkId, key: [u8; 20] }, @@ -73,8 +73,8 @@ pub enum Junction { OnlyChild, /// A pluralistic body existing within consensus. /// - /// Typical to be used to represent a governance origin of a chain, but could in principle be used to represent - /// things such as multisigs also. + /// Typical to be used to represent a governance origin of a chain, but could in principle be + /// used to represent things such as multisigs also. Plurality { id: BodyId, part: BodyPart }, } diff --git a/xcm/src/v2/mod.rs b/xcm/src/v2/mod.rs index 014942d6b679..79cc8ead89a1 100644 --- a/xcm/src/v2/mod.rs +++ b/xcm/src/v2/mod.rs @@ -39,11 +39,10 @@ //! - `Order` is now obsolete and replaced entirely by `Instruction`. //! - `Xcm` is now a simple wrapper around a `Vec`. //! - During conversion from `Order` to `Instruction`, we do not handle `BuyExecution`s that have -//! nested XCMs, i.e. if the `instructions` field in the `BuyExecution` enum struct variant is -//! not empty, then the conversion will fail. To address this, rewrite the XCM using -//! `Instruction`s in chronological order. -//! - During conversion from `Xcm` to `Instruction`, we do not handle `RelayedFrom` messages at -//! all. +//! nested XCMs, i.e. if the `instructions` field in the `BuyExecution` enum struct variant is not +//! empty, then the conversion will fail. To address this, rewrite the XCM using `Instruction`s in +//! chronological order. +//! - During conversion from `Xcm` to `Instruction`, we do not handle `RelayedFrom` messages at all. //! //! ### XCM Pallet //! - The `Weigher` configuration item must have sensible weights defined for `BuyExecution` and @@ -153,20 +152,20 @@ pub enum BodyId { Executive, /// The unambiguous technical body (for Polkadot, this would be the Technical Committee). Technical, - /// The unambiguous legislative body (for Polkadot, this could be considered the opinion of a majority of - /// lock-voters). + /// The unambiguous legislative body (for Polkadot, this could be considered the opinion of a + /// majority of lock-voters). Legislative, - /// The unambiguous judicial body (this doesn't exist on Polkadot, but if it were to get a "grand oracle", it - /// may be considered as that). + /// The unambiguous judicial body (this doesn't exist on Polkadot, but if it were to get a + /// "grand oracle", it may be considered as that). Judicial, - /// The unambiguous defense body (for Polkadot, an opinion on the topic given via a public referendum - /// on the `staking_admin` track). + /// The unambiguous defense body (for Polkadot, an opinion on the topic given via a public + /// referendum on the `staking_admin` track). Defense, - /// The unambiguous administration body (for Polkadot, an opinion on the topic given via a public referendum - /// on the `general_admin` track). + /// The unambiguous administration body (for Polkadot, an opinion on the topic given via a + /// public referendum on the `general_admin` track). Administration, - /// The unambiguous treasury body (for Polkadot, an opinion on the topic given via a public referendum - /// on the `treasurer` track). + /// The unambiguous treasury body (for Polkadot, an opinion on the topic given via a public + /// referendum on the `treasurer` track). Treasury, } @@ -422,8 +421,8 @@ pub type Weight = u64; /// /// All messages are delivered from a known *origin*, expressed as a `MultiLocation`. /// -/// This is the inner XCM format and is version-sensitive. Messages are typically passed using the outer -/// XCM format, known as `VersionedXcm`. +/// This is the inner XCM format and is version-sensitive. Messages are typically passed using the +/// outer XCM format, known as `VersionedXcm`. #[derive(Derivative, Encode, Decode, TypeInfo, xcm_procedural::XcmWeightInfoTrait)] #[derivative(Clone(bound = ""), Eq(bound = ""), PartialEq(bound = ""), Debug(bound = ""))] #[codec(encode_bound())] @@ -508,8 +507,8 @@ pub enum Instruction { /// - `dest`: The location whose sovereign account will own the assets and thus the effective /// beneficiary for the assets and the notification target for the reserve asset deposit /// message. - /// - `xcm`: The instructions that should follow the `ReserveAssetDeposited` - /// instruction, which is sent onwards to `dest`. + /// - `xcm`: The instructions that should follow the `ReserveAssetDeposited` instruction, which + /// is sent onwards to `dest`. /// /// Safety: No concerns. /// @@ -538,10 +537,11 @@ pub enum Instruction { call: DoubleEncoded, }, - /// A message to notify about a new incoming HRMP channel. This message is meant to be sent by the - /// relay-chain to a para. + /// A message to notify about a new incoming HRMP channel. This message is meant to be sent by + /// the relay-chain to a para. /// - /// - `sender`: The sender in the to-be opened channel. Also, the initiator of the channel opening. + /// - `sender`: The sender in the to-be opened channel. Also, the initiator of the channel + /// opening. /// - `max_message_size`: The maximum size of a message proposed by the sender. /// - `max_capacity`: The maximum number of messages that can be queued in the channel. /// @@ -558,8 +558,8 @@ pub enum Instruction { }, /// A message to notify about that a previously sent open channel request has been accepted by - /// the recipient. That means that the channel will be opened during the next relay-chain session - /// change. This message is meant to be sent by the relay-chain to a para. + /// the recipient. That means that the channel will be opened during the next relay-chain + /// session change. This message is meant to be sent by the relay-chain to a para. /// /// Safety: The message should originate directly from the relay-chain. /// @@ -573,10 +573,10 @@ pub enum Instruction { recipient: u32, }, - /// A message to notify that the other party in an open channel decided to close it. In particular, - /// `initiator` is going to close the channel opened from `sender` to the `recipient`. The close - /// will be enacted at the next relay-chain session change. This message is meant to be sent by - /// the relay-chain to a para. + /// A message to notify that the other party in an open channel decided to close it. In + /// particular, `initiator` is going to close the channel opened from `sender` to the + /// `recipient`. The close will be enacted at the next relay-chain session change. This message + /// is meant to be sent by the relay-chain to a para. /// /// Safety: The message should originate directly from the relay-chain. /// @@ -639,8 +639,8 @@ pub enum Instruction { /// /// - `assets`: The asset(s) to remove from holding. /// - `max_assets`: The maximum number of unique assets/asset instances to remove from holding. - /// Only the first `max_assets` assets/instances of those matched by `assets` will be removed, - /// prioritized under standard asset ordering. Any others will remain in holding. + /// Only the first `max_assets` assets/instances of those matched by `assets` will be + /// removed, prioritized under standard asset ordering. Any others will remain in holding. /// - `beneficiary`: The new owner for the assets. /// /// Kind: *Instruction* @@ -661,13 +661,13 @@ pub enum Instruction { /// /// - `assets`: The asset(s) to remove from holding. /// - `max_assets`: The maximum number of unique assets/asset instances to remove from holding. - /// Only the first `max_assets` assets/instances of those matched by `assets` will be removed, - /// prioritized under standard asset ordering. Any others will remain in holding. + /// Only the first `max_assets` assets/instances of those matched by `assets` will be + /// removed, prioritized under standard asset ordering. Any others will remain in holding. /// - `dest`: The location whose sovereign account will own the assets and thus the effective /// beneficiary for the assets and the notification target for the reserve asset deposit /// message. - /// - `xcm`: The orders that should follow the `ReserveAssetDeposited` instruction - /// which is sent onwards to `dest`. + /// - `xcm`: The orders that should follow the `ReserveAssetDeposited` instruction which is + /// sent onwards to `dest`. /// /// Kind: *Instruction* /// @@ -699,9 +699,9 @@ pub enum Instruction { /// /// - `assets`: The asset(s) to remove from holding. /// - `reserve`: A valid location that acts as a reserve for all asset(s) in `assets`. The - /// sovereign account of this consensus system *on the reserve location* will have appropriate - /// assets withdrawn and `effects` will be executed on them. There will typically be only one - /// valid location on any given asset/chain combination. + /// sovereign account of this consensus system *on the reserve location* will have + /// appropriate assets withdrawn and `effects` will be executed on them. There will typically + /// be only one valid location on any given asset/chain combination. /// - `xcm`: The instructions to execute on the assets once withdrawn *on the reserve /// location*. /// @@ -718,8 +718,8 @@ pub enum Instruction { /// - `xcm`: The instructions to execute on the assets once arrived *on the destination /// location*. /// - /// NOTE: The `dest` location *MUST* respect this origin as a valid teleportation origin for all - /// `assets`. If it does not, then the assets may be lost. + /// NOTE: The `dest` location *MUST* respect this origin as a valid teleportation origin for + /// all `assets`. If it does not, then the assets may be lost. /// /// Kind: *Instruction* /// diff --git a/xcm/src/v2/multiasset.rs b/xcm/src/v2/multiasset.rs index aae65dcbb54a..fdd7797a1230 100644 --- a/xcm/src/v2/multiasset.rs +++ b/xcm/src/v2/multiasset.rs @@ -17,11 +17,14 @@ //! Cross-Consensus Message format asset data structures. //! //! This encompasses four types for representing assets: -//! - `MultiAsset`: A description of a single asset, either an instance of a non-fungible or some amount of a fungible. -//! - `MultiAssets`: A collection of `MultiAsset`s. These are stored in a `Vec` and sorted with fungibles first. -//! - `Wild`: A single asset wildcard, this can either be "all" assets, or all assets of a specific kind. -//! - `MultiAssetFilter`: A combination of `Wild` and `MultiAssets` designed for efficiently filtering an XCM holding -//! account. +//! - `MultiAsset`: A description of a single asset, either an instance of a non-fungible or some +//! amount of a fungible. +//! - `MultiAssets`: A collection of `MultiAsset`s. These are stored in a `Vec` and sorted with +//! fungibles first. +//! - `Wild`: A single asset wildcard, this can either be "all" assets, or all assets of a specific +//! kind. +//! - `MultiAssetFilter`: A combination of `Wild` and `MultiAssets` designed for efficiently +//! filtering an XCM holding account. use super::MultiLocation; use crate::v3::{ @@ -42,8 +45,8 @@ pub enum AssetInstance { /// Undefined - used if the non-fungible asset class has only one instance. Undefined, - /// A compact index. Technically this could be greater than `u128`, but this implementation supports only - /// values up to `2**128 - 1`. + /// A compact index. Technically this could be greater than `u128`, but this implementation + /// supports only values up to `2**128 - 1`. Index(#[codec(compact)] u128), /// A 4-byte fixed-length datum. @@ -165,19 +168,21 @@ impl AssetId { Ok(()) } - /// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding `MultiAsset` value. + /// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding + /// `MultiAsset` value. pub fn into_multiasset(self, fun: Fungibility) -> MultiAsset { MultiAsset { fun, id: self } } - /// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding `WildMultiAsset` - /// wildcard (`AllOf`) value. + /// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding + /// `WildMultiAsset` wildcard (`AllOf`) value. pub fn into_wild(self, fun: WildFungibility) -> WildMultiAsset { WildMultiAsset::AllOf { fun, id: self } } } -/// Classification of whether an asset is fungible or not, along with a mandatory amount or instance. +/// Classification of whether an asset is fungible or not, along with a mandatory amount or +/// instance. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, Decode, TypeInfo)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub enum Fungibility { @@ -300,7 +305,8 @@ impl TryFrom for MultiAsset { } } -/// A `Vec` of `MultiAsset`s. There may be no duplicate fungible items in here and when decoding, they must be sorted. +/// A `Vec` of `MultiAsset`s. There may be no duplicate fungible items in here and when decoding, +/// they must be sorted. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, TypeInfo)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct MultiAssets(Vec); @@ -370,11 +376,12 @@ impl MultiAssets { Self(Vec::new()) } - /// Create a new instance of `MultiAssets` from a `Vec` whose contents are sorted and - /// which contain no duplicates. + /// Create a new instance of `MultiAssets` from a `Vec` whose contents are sorted + /// and which contain no duplicates. /// - /// Returns `Ok` if the operation succeeds and `Err` if `r` is out of order or had duplicates. If you can't - /// guarantee that `r` is sorted and deduplicated, then use `From::>::from` which is infallible. + /// Returns `Ok` if the operation succeeds and `Err` if `r` is out of order or had duplicates. + /// If you can't guarantee that `r` is sorted and deduplicated, then use + /// `From::>::from` which is infallible. pub fn from_sorted_and_deduplicated(r: Vec) -> Result { if r.is_empty() { return Ok(Self(Vec::new())) @@ -389,20 +396,22 @@ impl MultiAssets { Ok(Self(r)) } - /// Create a new instance of `MultiAssets` from a `Vec` whose contents are sorted and - /// which contain no duplicates. + /// Create a new instance of `MultiAssets` from a `Vec` whose contents are sorted + /// and which contain no duplicates. /// - /// In release mode, this skips any checks to ensure that `r` is correct, making it a negligible-cost operation. - /// Generally though you should avoid using it unless you have a strict proof that `r` is valid. + /// In release mode, this skips any checks to ensure that `r` is correct, making it a + /// negligible-cost operation. Generally though you should avoid using it unless you have a + /// strict proof that `r` is valid. #[cfg(test)] pub fn from_sorted_and_deduplicated_skip_checks(r: Vec) -> Self { Self::from_sorted_and_deduplicated(r).expect("Invalid input r is not sorted/deduped") } - /// Create a new instance of `MultiAssets` from a `Vec` whose contents are sorted and - /// which contain no duplicates. + /// Create a new instance of `MultiAssets` from a `Vec` whose contents are sorted + /// and which contain no duplicates. /// - /// In release mode, this skips any checks to ensure that `r` is correct, making it a negligible-cost operation. - /// Generally though you should avoid using it unless you have a strict proof that `r` is valid. + /// In release mode, this skips any checks to ensure that `r` is correct, making it a + /// negligible-cost operation. Generally though you should avoid using it unless you have a + /// strict proof that `r` is valid. /// /// In test mode, this checks anyway and panics on fail. #[cfg(not(test))] @@ -410,7 +419,8 @@ impl MultiAssets { Self(r) } - /// Add some asset onto the list, saturating. This is quite a laborious operation since it maintains the ordering. + /// Add some asset onto the list, saturating. This is quite a laborious operation since it + /// maintains the ordering. pub fn push(&mut self, a: MultiAsset) { if let Fungibility::Fungible(ref amount) = a.fun { for asset in self.0.iter_mut().filter(|x| x.id == a.id) { @@ -489,19 +499,19 @@ impl TryFrom for WildFungibility { #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, Decode, TypeInfo)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub enum WildMultiAsset { - /// All assets in the holding register, up to `usize` individual assets (different instances of non-fungibles could - /// be separate assets). + /// All assets in the holding register, up to `usize` individual assets (different instances of + /// non-fungibles could be separate assets). All, - /// All assets in the holding register of a given fungibility and ID. If operating on non-fungibles, then a limit - /// is provided for the maximum amount of matching instances. + /// All assets in the holding register of a given fungibility and ID. If operating on + /// non-fungibles, then a limit is provided for the maximum amount of matching instances. AllOf { id: AssetId, fun: WildFungibility }, } impl WildMultiAsset { /// Returns true if `self` is a super-set of the given `inner`. /// - /// Typically, any wildcard is never contained in anything else, and a wildcard can contain any other non-wildcard. - /// For more details, see the implementation and tests. + /// Typically, any wildcard is never contained in anything else, and a wildcard can contain any + /// other non-wildcard. For more details, see the implementation and tests. pub fn contains(&self, inner: &MultiAsset) -> bool { use WildMultiAsset::*; match self { @@ -565,8 +575,8 @@ impl From for MultiAssetFilter { impl MultiAssetFilter { /// Returns true if `self` is a super-set of the given `inner`. /// - /// Typically, any wildcard is never contained in anything else, and a wildcard can contain any other non-wildcard. - /// For more details, see the implementation and tests. + /// Typically, any wildcard is never contained in anything else, and a wildcard can contain any + /// other non-wildcard. For more details, see the implementation and tests. pub fn contains(&self, inner: &MultiAsset) -> bool { match self { MultiAssetFilter::Definite(ref assets) => assets.contains(inner), diff --git a/xcm/src/v2/multilocation.rs b/xcm/src/v2/multilocation.rs index 086a83277322..9fb74e8afb35 100644 --- a/xcm/src/v2/multilocation.rs +++ b/xcm/src/v2/multilocation.rs @@ -174,8 +174,8 @@ impl MultiLocation { self.interior.push_front(new) } - /// Consumes `self` and returns a `MultiLocation` suffixed with `new`, or an `Err` with theoriginal value of - /// `self` in case of overflow. + /// Consumes `self` and returns a `MultiLocation` suffixed with `new`, or an `Err` with + /// theoriginal value of `self` in case of overflow. pub fn pushed_with_interior(self, new: Junction) -> result::Result { match self.interior.pushed_with(new) { Ok(i) => Ok(MultiLocation { interior: i, parents: self.parents }), @@ -183,8 +183,8 @@ impl MultiLocation { } } - /// Consumes `self` and returns a `MultiLocation` prefixed with `new`, or an `Err` with the original value of - /// `self` in case of overflow. + /// Consumes `self` and returns a `MultiLocation` prefixed with `new`, or an `Err` with the + /// original value of `self` in case of overflow. pub fn pushed_front_with_interior( self, new: Junction, @@ -430,7 +430,8 @@ impl From for MultiLocation { } } -/// A tuple struct which can be converted into a `MultiLocation` of `parents` value 1 with the inner interior. +/// A tuple struct which can be converted into a `MultiLocation` of `parents` value 1 with the inner +/// interior. #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)] pub struct ParentThen(pub Junctions); impl From for MultiLocation { @@ -448,7 +449,8 @@ impl From for MultiLocation { } } -/// A unit struct which can be converted into a `MultiLocation` of the inner `parents` value and the inner interior. +/// A unit struct which can be converted into a `MultiLocation` of the inner `parents` value and the +/// inner interior. #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)] pub struct AncestorThen(pub u8, pub Interior); impl> From> for MultiLocation { @@ -598,8 +600,8 @@ impl Junctions { } } - /// Splits off the first junction, returning the remaining suffix (first item in tuple) and the first element - /// (second item in tuple) or `None` if it was empty. + /// Splits off the first junction, returning the remaining suffix (first item in tuple) and the + /// first element (second item in tuple) or `None` if it was empty. pub fn split_first(self) -> (Junctions, Option) { match self { Junctions::Here => (Junctions::Here, None), @@ -614,8 +616,8 @@ impl Junctions { } } - /// Splits off the last junction, returning the remaining prefix (first item in tuple) and the last element - /// (second item in tuple) or `None` if it was empty. + /// Splits off the last junction, returning the remaining prefix (first item in tuple) and the + /// last element (second item in tuple) or `None` if it was empty. pub fn split_last(self) -> (Junctions, Option) { match self { Junctions::Here => (Junctions::Here, None), @@ -727,7 +729,8 @@ impl Junctions { } } - /// Returns the junction at index `i`, or `None` if the location doesn't contain that many elements. + /// Returns the junction at index `i`, or `None` if the location doesn't contain that many + /// elements. pub fn at(&self, i: usize) -> Option<&Junction> { Some(match (i, self) { (0, Junctions::X1(ref a)) => a, @@ -770,8 +773,8 @@ impl Junctions { }) } - /// Returns a mutable reference to the junction at index `i`, or `None` if the location doesn't contain that many - /// elements. + /// Returns a mutable reference to the junction at index `i`, or `None` if the location doesn't + /// contain that many elements. pub fn at_mut(&mut self, i: usize) -> Option<&mut Junction> { Some(match (i, self) { (0, Junctions::X1(ref mut a)) => a, diff --git a/xcm/src/v2/traits.rs b/xcm/src/v2/traits.rs index 524b659d57e1..ae03cf5547ba 100644 --- a/xcm/src/v2/traits.rs +++ b/xcm/src/v2/traits.rs @@ -81,7 +81,8 @@ pub enum Error { /// Used by `Transact` when the functor cannot be decoded. #[codec(index = 17)] FailedToDecode, - /// Used by `Transact` to indicate that the given weight limit could be breached by the functor. + /// Used by `Transact` to indicate that the given weight limit could be breached by the + /// functor. #[codec(index = 18)] MaxWeightInvalid, /// Used by `BuyExecution` when the Holding Register does not contain payable fees. @@ -94,7 +95,8 @@ pub enum Error { #[codec(index = 21)] Trap(u64), - // Errors that happen prior to instructions being executed. These fall outside of the XCM spec. + // Errors that happen prior to instructions being executed. These fall outside of the XCM + // spec. /// XCM version not able to be handled. UnhandledXcmVersion, /// Execution of the XCM would potentially result in a greater weight used than weight limit. @@ -161,7 +163,8 @@ pub type Result = result::Result<(), Error>; pub enum Outcome { /// Execution completed successfully; given weight was used. Complete(Weight), - /// Execution started, but did not complete successfully due to the given error; given weight was used. + /// Execution started, but did not complete successfully due to the given error; given weight + /// was used. Incomplete(Weight, Error), /// Execution did not start due to the given error. Error(Error), @@ -194,9 +197,9 @@ impl Outcome { /// Type of XCM message executor. pub trait ExecuteXcm { - /// Execute some XCM `message` from `origin` using no more than `weight_limit` weight. The weight limit is - /// a basic hard-limit and the implementation may place further restrictions or requirements on weight and - /// other aspects. + /// Execute some XCM `message` from `origin` using no more than `weight_limit` weight. The + /// weight limit is a basic hard-limit and the implementation may place further restrictions or + /// requirements on weight and other aspects. fn execute_xcm( origin: impl Into, message: Xcm, @@ -215,8 +218,8 @@ pub trait ExecuteXcm { /// Execute some XCM `message` from `origin` using no more than `weight_limit` weight. /// - /// Some amount of `weight_credit` may be provided which, depending on the implementation, may allow - /// execution without associated payment. + /// Some amount of `weight_credit` may be provided which, depending on the implementation, may + /// allow execution without associated payment. fn execute_xcm_in_credit( origin: impl Into, message: Xcm, @@ -263,9 +266,9 @@ pub type SendResult = result::Result<(), SendError>; /// Utility for sending an XCM message. /// -/// These can be amalgamated in tuples to form sophisticated routing systems. In tuple format, each router might return -/// `NotApplicable` to pass the execution to the next sender item. Note that each `NotApplicable` -/// might alter the destination and the XCM message for to the next router. +/// These can be amalgamated in tuples to form sophisticated routing systems. In tuple format, each +/// router might return `NotApplicable` to pass the execution to the next sender item. Note that +/// each `NotApplicable` might alter the destination and the XCM message for to the next router. /// /// /// # Example @@ -330,9 +333,9 @@ pub type SendResult = result::Result<(), SendError>; pub trait SendXcm { /// Send an XCM `message` to a given `destination`. /// - /// If it is not a destination which can be reached with this type but possibly could by others, then it *MUST* - /// return `NotApplicable`. Any other error will cause the tuple implementation to exit early without - /// trying other type fields. + /// If it is not a destination which can be reached with this type but possibly could by others, + /// then it *MUST* return `NotApplicable`. Any other error will cause the tuple implementation + /// to exit early without trying other type fields. fn send_xcm(destination: impl Into, message: Xcm<()>) -> SendResult; } diff --git a/xcm/src/v3/junction.rs b/xcm/src/v3/junction.rs index 5fee8d1f83bd..ae66e2b33364 100644 --- a/xcm/src/v3/junction.rs +++ b/xcm/src/v3/junction.rs @@ -127,20 +127,20 @@ pub enum BodyId { Executive, /// The unambiguous technical body (for Polkadot, this would be the Technical Committee). Technical, - /// The unambiguous legislative body (for Polkadot, this could be considered the opinion of a majority of - /// lock-voters). + /// The unambiguous legislative body (for Polkadot, this could be considered the opinion of a + /// majority of lock-voters). Legislative, - /// The unambiguous judicial body (this doesn't exist on Polkadot, but if it were to get a "grand oracle", it - /// may be considered as that). + /// The unambiguous judicial body (this doesn't exist on Polkadot, but if it were to get a + /// "grand oracle", it may be considered as that). Judicial, - /// The unambiguous defense body (for Polkadot, an opinion on the topic given via a public referendum - /// on the `staking_admin` track). + /// The unambiguous defense body (for Polkadot, an opinion on the topic given via a public + /// referendum on the `staking_admin` track). Defense, - /// The unambiguous administration body (for Polkadot, an opinion on the topic given via a public referendum - /// on the `general_admin` track). + /// The unambiguous administration body (for Polkadot, an opinion on the topic given via a + /// public referendum on the `general_admin` track). Administration, - /// The unambiguous treasury body (for Polkadot, an opinion on the topic given via a public referendum - /// on the `treasurer` track). + /// The unambiguous treasury body (for Polkadot, an opinion on the topic given via a public + /// referendum on the `treasurer` track). Treasury, } @@ -266,13 +266,13 @@ pub enum Junction { /// /// Generally used when the context is a Polkadot Relay-chain. Parachain(#[codec(compact)] u32), - /// A 32-byte identifier for an account of a specific network that is respected as a sovereign endpoint within - /// the context. + /// A 32-byte identifier for an account of a specific network that is respected as a sovereign + /// endpoint within the context. /// /// Generally used when the context is a Substrate-based chain. AccountId32 { network: Option, id: [u8; 32] }, - /// An 8-byte index for an account of a specific network that is respected as a sovereign endpoint within - /// the context. + /// An 8-byte index for an account of a specific network that is respected as a sovereign + /// endpoint within the context. /// /// May be used when the context is a Frame-based chain and includes e.g. an indices pallet. AccountIndex64 { @@ -280,8 +280,8 @@ pub enum Junction { #[codec(compact)] index: u64, }, - /// A 20-byte identifier for an account of a specific network that is respected as a sovereign endpoint within - /// the context. + /// A 20-byte identifier for an account of a specific network that is respected as a sovereign + /// endpoint within the context. /// /// May be used when the context is an Ethereum or Bitcoin chain or smart-contract. AccountKey20 { network: Option, key: [u8; 20] }, @@ -310,8 +310,8 @@ pub enum Junction { OnlyChild, /// A pluralistic body existing within consensus. /// - /// Typical to be used to represent a governance origin of a chain, but could in principle be used to represent - /// things such as multisigs also. + /// Typical to be used to represent a governance origin of a chain, but could in principle be + /// used to represent things such as multisigs also. Plurality { id: BodyId, part: BodyPart }, /// A global network capable of externalizing its own consensus. This is not generally /// meaningful outside of the universal level. @@ -413,7 +413,8 @@ impl Junction { /// Convert `self` into a `MultiLocation` containing `n` parents. /// - /// Similar to `Self::into_location`, with the added ability to specify the number of parent junctions. + /// Similar to `Self::into_location`, with the added ability to specify the number of parent + /// junctions. pub const fn into_exterior(self, n: u8) -> MultiLocation { MultiLocation { parents: n, interior: Junctions::X1(self) } } diff --git a/xcm/src/v3/junctions.rs b/xcm/src/v3/junctions.rs index da06cdbdad67..201a80fb7658 100644 --- a/xcm/src/v3/junctions.rs +++ b/xcm/src/v3/junctions.rs @@ -137,7 +137,8 @@ impl Junctions { /// Convert `self` into a `MultiLocation` containing `n` parents. /// - /// Similar to `Self::into_location`, with the added ability to specify the number of parent junctions. + /// Similar to `Self::into_location`, with the added ability to specify the number of parent + /// junctions. pub const fn into_exterior(self, n: u8) -> MultiLocation { MultiLocation { parents: n, interior: self } } @@ -309,8 +310,8 @@ impl Junctions { } } - /// Splits off the first junction, returning the remaining suffix (first item in tuple) and the first element - /// (second item in tuple) or `None` if it was empty. + /// Splits off the first junction, returning the remaining suffix (first item in tuple) and the + /// first element (second item in tuple) or `None` if it was empty. pub fn split_first(self) -> (Junctions, Option) { match self { Junctions::Here => (Junctions::Here, None), @@ -325,8 +326,8 @@ impl Junctions { } } - /// Splits off the last junction, returning the remaining prefix (first item in tuple) and the last element - /// (second item in tuple) or `None` if it was empty. + /// Splits off the last junction, returning the remaining prefix (first item in tuple) and the + /// last element (second item in tuple) or `None` if it was empty. pub fn split_last(self) -> (Junctions, Option) { match self { Junctions::Here => (Junctions::Here, None), @@ -469,7 +470,8 @@ impl Junctions { } } - /// Returns the junction at index `i`, or `None` if the location doesn't contain that many elements. + /// Returns the junction at index `i`, or `None` if the location doesn't contain that many + /// elements. pub fn at(&self, i: usize) -> Option<&Junction> { Some(match (i, self) { (0, Junctions::X1(ref a)) => a, @@ -512,8 +514,8 @@ impl Junctions { }) } - /// Returns a mutable reference to the junction at index `i`, or `None` if the location doesn't contain that many - /// elements. + /// Returns a mutable reference to the junction at index `i`, or `None` if the location doesn't + /// contain that many elements. pub fn at_mut(&mut self, i: usize) -> Option<&mut Junction> { Some(match (i, self) { (0, Junctions::X1(ref mut a)) => a, diff --git a/xcm/src/v3/mod.rs b/xcm/src/v3/mod.rs index 772ad48ac4b2..3614dc22550d 100644 --- a/xcm/src/v3/mod.rs +++ b/xcm/src/v3/mod.rs @@ -367,8 +367,8 @@ impl XcmContext { /// /// All messages are delivered from a known *origin*, expressed as a `MultiLocation`. /// -/// This is the inner XCM format and is version-sensitive. Messages are typically passed using the outer -/// XCM format, known as `VersionedXcm`. +/// This is the inner XCM format and is version-sensitive. Messages are typically passed using the +/// outer XCM format, known as `VersionedXcm`. #[derive(Derivative, Encode, Decode, TypeInfo, xcm_procedural::XcmWeightInfoTrait)] #[derivative(Clone(bound = ""), Eq(bound = ""), PartialEq(bound = ""), Debug(bound = ""))] #[codec(encode_bound())] @@ -417,9 +417,8 @@ pub enum Instruction { /// - `response`: The message content. /// - `max_weight`: The maximum weight that handling this response should take. /// - `querier`: The location responsible for the initiation of the response, if there is one. - /// In general this will tend to be the same location as the receiver of this message. - /// NOTE: As usual, this is interpreted from the perspective of the receiving consensus - /// system. + /// In general this will tend to be the same location as the receiver of this message. NOTE: + /// As usual, this is interpreted from the perspective of the receiving consensus system. /// /// Safety: Since this is information only, there are no immediate concerns. However, it should /// be remembered that even if the Origin behaves reasonably, it can always be asked to make @@ -460,8 +459,8 @@ pub enum Instruction { /// - `dest`: The location whose sovereign account will own the assets and thus the effective /// beneficiary for the assets and the notification target for the reserve asset deposit /// message. - /// - `xcm`: The instructions that should follow the `ReserveAssetDeposited` - /// instruction, which is sent onwards to `dest`. + /// - `xcm`: The instructions that should follow the `ReserveAssetDeposited` instruction, which + /// is sent onwards to `dest`. /// /// Safety: No concerns. /// @@ -487,10 +486,11 @@ pub enum Instruction { /// Errors: Transact { origin_kind: OriginKind, require_weight_at_most: Weight, call: DoubleEncoded }, - /// A message to notify about a new incoming HRMP channel. This message is meant to be sent by the - /// relay-chain to a para. + /// A message to notify about a new incoming HRMP channel. This message is meant to be sent by + /// the relay-chain to a para. /// - /// - `sender`: The sender in the to-be opened channel. Also, the initiator of the channel opening. + /// - `sender`: The sender in the to-be opened channel. Also, the initiator of the channel + /// opening. /// - `max_message_size`: The maximum size of a message proposed by the sender. /// - `max_capacity`: The maximum number of messages that can be queued in the channel. /// @@ -507,8 +507,8 @@ pub enum Instruction { }, /// A message to notify about that a previously sent open channel request has been accepted by - /// the recipient. That means that the channel will be opened during the next relay-chain session - /// change. This message is meant to be sent by the relay-chain to a para. + /// the recipient. That means that the channel will be opened during the next relay-chain + /// session change. This message is meant to be sent by the relay-chain to a para. /// /// Safety: The message should originate directly from the relay-chain. /// @@ -522,10 +522,10 @@ pub enum Instruction { recipient: u32, }, - /// A message to notify that the other party in an open channel decided to close it. In particular, - /// `initiator` is going to close the channel opened from `sender` to the `recipient`. The close - /// will be enacted at the next relay-chain session change. This message is meant to be sent by - /// the relay-chain to a para. + /// A message to notify that the other party in an open channel decided to close it. In + /// particular, `initiator` is going to close the channel opened from `sender` to the + /// `recipient`. The close will be enacted at the next relay-chain session change. This message + /// is meant to be sent by the relay-chain to a para. /// /// Safety: The message should originate directly from the relay-chain. /// @@ -593,8 +593,8 @@ pub enum Instruction { /// - `dest`: The location whose sovereign account will own the assets and thus the effective /// beneficiary for the assets and the notification target for the reserve asset deposit /// message. - /// - `xcm`: The orders that should follow the `ReserveAssetDeposited` instruction - /// which is sent onwards to `dest`. + /// - `xcm`: The orders that should follow the `ReserveAssetDeposited` instruction which is + /// sent onwards to `dest`. /// /// Kind: *Instruction* /// @@ -623,9 +623,9 @@ pub enum Instruction { /// /// - `assets`: The asset(s) to remove from holding. /// - `reserve`: A valid location that acts as a reserve for all asset(s) in `assets`. The - /// sovereign account of this consensus system *on the reserve location* will have appropriate - /// assets withdrawn and `effects` will be executed on them. There will typically be only one - /// valid location on any given asset/chain combination. + /// sovereign account of this consensus system *on the reserve location* will have + /// appropriate assets withdrawn and `effects` will be executed on them. There will typically + /// be only one valid location on any given asset/chain combination. /// - `xcm`: The instructions to execute on the assets once withdrawn *on the reserve /// location*. /// @@ -642,8 +642,8 @@ pub enum Instruction { /// - `xcm`: The instructions to execute on the assets once arrived *on the destination /// location*. /// - /// NOTE: The `dest` location *MUST* respect this origin as a valid teleportation origin for all - /// `assets`. If it does not, then the assets may be lost. + /// NOTE: The `dest` location *MUST* respect this origin as a valid teleportation origin for + /// all `assets`. If it does not, then the assets may be lost. /// /// Kind: *Instruction* /// @@ -809,7 +809,8 @@ pub enum Instruction { /// Kind: *Instruction* /// /// Errors: - /// - `ExpectationFalse`: If the value of the Transact Status Register is not equal to the parameter. + /// - `ExpectationFalse`: If the value of the Transact Status Register is not equal to the + /// parameter. ExpectTransactStatus(MaybeErrorCode), /// Query the existence of a particular pallet type. @@ -830,11 +831,15 @@ pub enum Instruction { /// Ensure that a particular pallet with a particular version exists. /// - /// - `index: Compact`: The index which identifies the pallet. An error if no pallet exists at this index. + /// - `index: Compact`: The index which identifies the pallet. An error if no pallet exists at + /// this index. /// - `name: Vec`: Name which must be equal to the name of the pallet. - /// - `module_name: Vec`: Module name which must be equal to the name of the module in which the pallet exists. - /// - `crate_major: Compact`: Version number which must be equal to the major version of the crate which implements the pallet. - /// - `min_crate_minor: Compact`: Version number which must be at most the minor version of the crate which implements the pallet. + /// - `module_name: Vec`: Module name which must be equal to the name of the module in + /// which the pallet exists. + /// - `crate_major: Compact`: Version number which must be equal to the major version of the + /// crate which implements the pallet. + /// - `min_crate_minor: Compact`: Version number which must be at most the minor version of the + /// crate which implements the pallet. /// /// Safety: No concerns. /// @@ -961,8 +966,8 @@ pub enum Instruction { /// of course, if there is no record that the asset actually is locked. /// /// - `asset`: The asset(s) to be unlocked. - /// - `locker`: The location from which a previous `NoteUnlockable` was sent and to which - /// an `UnlockAsset` should be sent. + /// - `locker`: The location from which a previous `NoteUnlockable` was sent and to which an + /// `UnlockAsset` should be sent. /// /// Kind: *Instruction*. /// @@ -971,8 +976,8 @@ pub enum Instruction { /// Sets the Fees Mode Register. /// - /// - `jit_withdraw`: The fees mode item; if set to `true` then fees for any instructions - /// are withdrawn as needed using the same mechanism as `WithdrawAssets`. + /// - `jit_withdraw`: The fees mode item; if set to `true` then fees for any instructions are + /// withdrawn as needed using the same mechanism as `WithdrawAssets`. /// /// Kind: *Instruction*. /// diff --git a/xcm/src/v3/multiasset.rs b/xcm/src/v3/multiasset.rs index a4900a71539a..1668d1b870dc 100644 --- a/xcm/src/v3/multiasset.rs +++ b/xcm/src/v3/multiasset.rs @@ -17,11 +17,14 @@ //! Cross-Consensus Message format asset data structures. //! //! This encompasses four types for representing assets: -//! - `MultiAsset`: A description of a single asset, either an instance of a non-fungible or some amount of a fungible. -//! - `MultiAssets`: A collection of `MultiAsset`s. These are stored in a `Vec` and sorted with fungibles first. -//! - `Wild`: A single asset wildcard, this can either be "all" assets, or all assets of a specific kind. -//! - `MultiAssetFilter`: A combination of `Wild` and `MultiAssets` designed for efficiently filtering an XCM holding -//! account. +//! - `MultiAsset`: A description of a single asset, either an instance of a non-fungible or some +//! amount of a fungible. +//! - `MultiAssets`: A collection of `MultiAsset`s. These are stored in a `Vec` and sorted with +//! fungibles first. +//! - `Wild`: A single asset wildcard, this can either be "all" assets, or all assets of a specific +//! kind. +//! - `MultiAssetFilter`: A combination of `Wild` and `MultiAssets` designed for efficiently +//! filtering an XCM holding account. use super::{InteriorMultiLocation, MultiLocation}; use crate::v2::{ @@ -47,8 +50,8 @@ pub enum AssetInstance { /// Undefined - used if the non-fungible asset class has only one instance. Undefined, - /// A compact index. Technically this could be greater than `u128`, but this implementation supports only - /// values up to `2**128 - 1`. + /// A compact index. Technically this could be greater than `u128`, but this implementation + /// supports only values up to `2**128 - 1`. Index(#[codec(compact)] u128), /// A 4-byte fixed-length datum. @@ -234,7 +237,8 @@ impl TryFrom for u128 { } } -/// Classification of whether an asset is fungible or not, along with a mandatory amount or instance. +/// Classification of whether an asset is fungible or not, along with a mandatory amount or +/// instance. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, TypeInfo, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub enum Fungibility { @@ -387,13 +391,14 @@ impl AssetId { Ok(()) } - /// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding `MultiAsset` value. + /// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding + /// `MultiAsset` value. pub fn into_multiasset(self, fun: Fungibility) -> MultiAsset { MultiAsset { fun, id: self } } - /// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding `WildMultiAsset` - /// wildcard (`AllOf`) value. + /// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding + /// `WildMultiAsset` wildcard (`AllOf`) value. pub fn into_wild(self, fun: WildFungibility) -> WildMultiAsset { WildMultiAsset::AllOf { fun, id: self } } @@ -576,11 +581,12 @@ impl MultiAssets { Self(Vec::new()) } - /// Create a new instance of `MultiAssets` from a `Vec` whose contents are sorted and - /// which contain no duplicates. + /// Create a new instance of `MultiAssets` from a `Vec` whose contents are sorted + /// and which contain no duplicates. /// - /// Returns `Ok` if the operation succeeds and `Err` if `r` is out of order or had duplicates. If you can't - /// guarantee that `r` is sorted and deduplicated, then use `From::>::from` which is infallible. + /// Returns `Ok` if the operation succeeds and `Err` if `r` is out of order or had duplicates. + /// If you can't guarantee that `r` is sorted and deduplicated, then use + /// `From::>::from` which is infallible. pub fn from_sorted_and_deduplicated(r: Vec) -> Result { if r.is_empty() { return Ok(Self(Vec::new())) @@ -595,20 +601,22 @@ impl MultiAssets { Ok(Self(r)) } - /// Create a new instance of `MultiAssets` from a `Vec` whose contents are sorted and - /// which contain no duplicates. + /// Create a new instance of `MultiAssets` from a `Vec` whose contents are sorted + /// and which contain no duplicates. /// - /// In release mode, this skips any checks to ensure that `r` is correct, making it a negligible-cost operation. - /// Generally though you should avoid using it unless you have a strict proof that `r` is valid. + /// In release mode, this skips any checks to ensure that `r` is correct, making it a + /// negligible-cost operation. Generally though you should avoid using it unless you have a + /// strict proof that `r` is valid. #[cfg(test)] pub fn from_sorted_and_deduplicated_skip_checks(r: Vec) -> Self { Self::from_sorted_and_deduplicated(r).expect("Invalid input r is not sorted/deduped") } - /// Create a new instance of `MultiAssets` from a `Vec` whose contents are sorted and - /// which contain no duplicates. + /// Create a new instance of `MultiAssets` from a `Vec` whose contents are sorted + /// and which contain no duplicates. /// - /// In release mode, this skips any checks to ensure that `r` is correct, making it a negligible-cost operation. - /// Generally though you should avoid using it unless you have a strict proof that `r` is valid. + /// In release mode, this skips any checks to ensure that `r` is correct, making it a + /// negligible-cost operation. Generally though you should avoid using it unless you have a + /// strict proof that `r` is valid. /// /// In test mode, this checks anyway and panics on fail. #[cfg(not(test))] @@ -616,7 +624,8 @@ impl MultiAssets { Self(r) } - /// Add some asset onto the list, saturating. This is quite a laborious operation since it maintains the ordering. + /// Add some asset onto the list, saturating. This is quite a laborious operation since it + /// maintains the ordering. pub fn push(&mut self, a: MultiAsset) { for asset in self.0.iter_mut().filter(|x| x.id == a.id) { match (&a.fun, &mut asset.fun) { diff --git a/xcm/src/v3/multilocation.rs b/xcm/src/v3/multilocation.rs index 09d547503f1c..07f829d014c0 100644 --- a/xcm/src/v3/multilocation.rs +++ b/xcm/src/v3/multilocation.rs @@ -198,8 +198,8 @@ impl MultiLocation { self.interior.push_front(new) } - /// Consumes `self` and returns a `MultiLocation` suffixed with `new`, or an `Err` with theoriginal value of - /// `self` in case of overflow. + /// Consumes `self` and returns a `MultiLocation` suffixed with `new`, or an `Err` with + /// theoriginal value of `self` in case of overflow. pub fn pushed_with_interior( self, new: impl Into, @@ -210,8 +210,8 @@ impl MultiLocation { } } - /// Consumes `self` and returns a `MultiLocation` prefixed with `new`, or an `Err` with the original value of - /// `self` in case of overflow. + /// Consumes `self` and returns a `MultiLocation` prefixed with `new`, or an `Err` with the + /// original value of `self` in case of overflow. pub fn pushed_front_with_interior( self, new: impl Into, @@ -472,7 +472,8 @@ impl From for MultiLocation { } } -/// A tuple struct which can be converted into a `MultiLocation` of `parents` value 1 with the inner interior. +/// A tuple struct which can be converted into a `MultiLocation` of `parents` value 1 with the inner +/// interior. #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)] pub struct ParentThen(pub Junctions); impl From for MultiLocation { @@ -490,7 +491,8 @@ impl From for MultiLocation { } } -/// A unit struct which can be converted into a `MultiLocation` of the inner `parents` value and the inner interior. +/// A unit struct which can be converted into a `MultiLocation` of the inner `parents` value and the +/// inner interior. #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)] pub struct AncestorThen(pub u8, pub Interior); impl> From> for MultiLocation { diff --git a/xcm/src/v3/traits.rs b/xcm/src/v3/traits.rs index 966fb724ed11..128be42c2a2b 100644 --- a/xcm/src/v3/traits.rs +++ b/xcm/src/v3/traits.rs @@ -86,7 +86,8 @@ pub enum Error { /// Used by `Transact` when the functor cannot be decoded. #[codec(index = 17)] FailedToDecode, - /// Used by `Transact` to indicate that the given weight limit could be breached by the functor. + /// Used by `Transact` to indicate that the given weight limit could be breached by the + /// functor. #[codec(index = 18)] MaxWeightInvalid, /// Used by `BuyExecution` when the Holding Register does not contain payable fees. @@ -138,7 +139,8 @@ pub enum Error { #[codec(index = 34)] NotDepositable, - // Errors that happen prior to instructions being executed. These fall outside of the XCM spec. + // Errors that happen prior to instructions being executed. These fall outside of the XCM + // spec. /// XCM version not able to be handled. UnhandledXcmVersion, /// Execution of the XCM would potentially result in a greater weight used than weight limit. @@ -263,7 +265,8 @@ impl From for Outcome { pub enum Outcome { /// Execution completed successfully; given weight was used. Complete(Weight), - /// Execution started, but did not complete successfully due to the given error; given weight was used. + /// Execution started, but did not complete successfully due to the given error; given weight + /// was used. Incomplete(Weight, Error), /// Execution did not start due to the given error. Error(Error), diff --git a/xcm/xcm-builder/src/asset_conversion.rs b/xcm/xcm-builder/src/asset_conversion.rs index 583231d792dd..2fe26e8cd1e3 100644 --- a/xcm/xcm-builder/src/asset_conversion.rs +++ b/xcm/xcm-builder/src/asset_conversion.rs @@ -22,9 +22,9 @@ use sp_std::{marker::PhantomData, prelude::*, result}; use xcm::latest::prelude::*; use xcm_executor::traits::{Error as MatchError, MatchesFungibles, MatchesNonFungibles}; -/// Converter struct implementing `AssetIdConversion` converting a numeric asset ID (must be `TryFrom/TryInto`) into -/// a `GeneralIndex` junction, prefixed by some `MultiLocation` value. The `MultiLocation` value will typically be a -/// `PalletInstance` junction. +/// Converter struct implementing `AssetIdConversion` converting a numeric asset ID (must be +/// `TryFrom/TryInto`) into a `GeneralIndex` junction, prefixed by some `MultiLocation` value. +/// The `MultiLocation` value will typically be a `PalletInstance` junction. pub struct AsPrefixedGeneralIndex( PhantomData<(Prefix, AssetId, ConvertAssetId)>, ); diff --git a/xcm/xcm-builder/src/currency_adapter.rs b/xcm/xcm-builder/src/currency_adapter.rs index 32db840858a9..4dbd4fe8bcd0 100644 --- a/xcm/xcm-builder/src/currency_adapter.rs +++ b/xcm/xcm-builder/src/currency_adapter.rs @@ -44,8 +44,8 @@ impl From for XcmError { } } -/// Simple adapter to use a currency as asset transactor. This type can be used as `type AssetTransactor` in -/// `xcm::Config`. +/// Simple adapter to use a currency as asset transactor. This type can be used as `type +/// AssetTransactor` in `xcm::Config`. /// /// # Example /// ``` diff --git a/xcm/xcm-builder/src/fungibles_adapter.rs b/xcm/xcm-builder/src/fungibles_adapter.rs index bcb0e9c870b3..d7fded01e2db 100644 --- a/xcm/xcm-builder/src/fungibles_adapter.rs +++ b/xcm/xcm-builder/src/fungibles_adapter.rs @@ -63,8 +63,8 @@ impl< /// The location which is allowed to mint a particular asset. #[derive(Copy, Clone, Eq, PartialEq)] pub enum MintLocation { - /// This chain is allowed to mint the asset. When we track teleports of the asset we ensure that - /// no more of the asset returns back to the chain than has been sent out. + /// This chain is allowed to mint the asset. When we track teleports of the asset we ensure + /// that no more of the asset returns back to the chain than has been sent out. Local, /// This chain is not allowed to mint the asset. When we track teleports of the asset we ensure /// that no more of the asset is sent out from the chain than has been previously received. diff --git a/xcm/xcm-builder/src/location_conversion.rs b/xcm/xcm-builder/src/location_conversion.rs index ccc3cc040e61..26b48fc88adc 100644 --- a/xcm/xcm-builder/src/location_conversion.rs +++ b/xcm/xcm-builder/src/location_conversion.rs @@ -345,10 +345,11 @@ impl>, AccountId: From<[u8; 20]> + Into<[u8; 20]> } } -/// Converts a location which is a top-level relay chain (which provides its own consensus) into a 32-byte `AccountId`. +/// Converts a location which is a top-level relay chain (which provides its own consensus) into a +/// 32-byte `AccountId`. /// -/// This will always result in the *same account ID* being returned for the same Relay-chain, regardless of the relative security of -/// this Relay-chain compared to the local chain. +/// This will always result in the *same account ID* being returned for the same Relay-chain, +/// regardless of the relative security of this Relay-chain compared to the local chain. /// /// Note: No distinction is made between the cases when the given `UniversalLocation` lies within /// the same consensus system (i.e. is itself or a parent) and when it is a foreign consensus diff --git a/xcm/xcm-builder/src/origin_aliases.rs b/xcm/xcm-builder/src/origin_aliases.rs index 12bcdad3dfea..82c5f71b7a12 100644 --- a/xcm/xcm-builder/src/origin_aliases.rs +++ b/xcm/xcm-builder/src/origin_aliases.rs @@ -20,7 +20,8 @@ use frame_support::traits::{Contains, ContainsPair}; use sp_std::marker::PhantomData; use xcm::latest::prelude::*; -/// Alias a Foreign `AccountId32` with a local `AccountId32` if the foreign `AccountId32` matches the `Prefix` pattern. +/// Alias a Foreign `AccountId32` with a local `AccountId32` if the foreign `AccountId32` matches +/// the `Prefix` pattern. /// /// Requires that the prefixed origin `AccountId32` matches the target `AccountId32`. pub struct AliasForeignAccountId32(PhantomData); diff --git a/xcm/xcm-builder/src/origin_conversion.rs b/xcm/xcm-builder/src/origin_conversion.rs index 0810b1ce2f8b..112b26869a99 100644 --- a/xcm/xcm-builder/src/origin_conversion.rs +++ b/xcm/xcm-builder/src/origin_conversion.rs @@ -24,7 +24,8 @@ use sp_std::marker::PhantomData; use xcm::latest::{BodyId, BodyPart, Junction, Junctions::*, MultiLocation, NetworkId, OriginKind}; use xcm_executor::traits::{ConvertLocation, ConvertOrigin}; -/// Sovereign accounts use the system's `Signed` origin with an account ID derived from the `LocationConverter`. +/// Sovereign accounts use the system's `Signed` origin with an account ID derived from the +/// `LocationConverter`. pub struct SovereignSignedViaLocation( PhantomData<(LocationConverter, RuntimeOrigin)>, ); @@ -269,10 +270,11 @@ where } } -/// `Convert` implementation to convert from some a `Signed` (system) `Origin` into an `AccountId32`. +/// `Convert` implementation to convert from some a `Signed` (system) `Origin` into an +/// `AccountId32`. /// -/// Typically used when configuring `pallet-xcm` for allowing normal accounts to dispatch an XCM from an `AccountId32` -/// origin. +/// Typically used when configuring `pallet-xcm` for allowing normal accounts to dispatch an XCM +/// from an `AccountId32` origin. pub struct SignedToAccountId32( PhantomData<(RuntimeOrigin, AccountId, Network)>, ); @@ -296,11 +298,11 @@ where } } -/// `Convert` implementation to convert from some an origin which implements `Backing` into a corresponding `Plurality` -/// `MultiLocation`. +/// `Convert` implementation to convert from some an origin which implements `Backing` into a +/// corresponding `Plurality` `MultiLocation`. /// -/// Typically used when configuring `pallet-xcm` for allowing a collective's Origin to dispatch an XCM from a -/// `Plurality` origin. +/// Typically used when configuring `pallet-xcm` for allowing a collective's Origin to dispatch an +/// XCM from a `Plurality` origin. pub struct BackingToPlurality( PhantomData<(RuntimeOrigin, COrigin, Body)>, ); diff --git a/xcm/xcm-builder/src/tests/assets.rs b/xcm/xcm-builder/src/tests/assets.rs index 9b8ba0e459de..dbcb731a1bda 100644 --- a/xcm/xcm-builder/src/tests/assets.rs +++ b/xcm/xcm-builder/src/tests/assets.rs @@ -396,7 +396,8 @@ fn max_assets_limit_should_work() { ); assert_eq!(r, Outcome::Incomplete(Weight::from_parts(95, 95), XcmError::HoldingWouldOverflow)); - // Attempt to withdraw 4 different assets and then the same 4 and then a different 4 will succeed. + // Attempt to withdraw 4 different assets and then the same 4 and then a different 4 will + // succeed. let message = Xcm(vec![ WithdrawAsset(([1u8; 32], 100u128).into()), WithdrawAsset(([2u8; 32], 100u128).into()), diff --git a/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs b/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs index 2f9bfcc2d80a..6870413c38d5 100644 --- a/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs +++ b/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs @@ -80,7 +80,8 @@ fn sending_to_bridged_chain_works() { )]; assert_eq!(take_received_remote_messages(), expected); - // The export cost 50 ref time and 50 proof size weight units (and thus 100 units of balance). + // The export cost 50 ref time and 50 proof size weight units (and thus 100 units of + // balance). assert_eq!(asset_list(Parachain(100)), vec![(Here, 1000u128 - price).into()]); let entry = LogEntry { @@ -154,7 +155,8 @@ fn sending_to_parachain_of_bridged_chain_works() { )]; assert_eq!(take_received_remote_messages(), expected); - // The export cost 50 ref time and 50 proof size weight units (and thus 100 units of balance). + // The export cost 50 ref time and 50 proof size weight units (and thus 100 units of + // balance). assert_eq!(asset_list(Parachain(100)), vec![(Here, 1000u128 - price).into()]); let entry = LogEntry { diff --git a/xcm/xcm-builder/src/tests/mock.rs b/xcm/xcm-builder/src/tests/mock.rs index 66a676369a67..aea780b84367 100644 --- a/xcm/xcm-builder/src/tests/mock.rs +++ b/xcm/xcm-builder/src/tests/mock.rs @@ -60,8 +60,8 @@ pub enum TestOrigin { /// A dummy call. /// -/// Each item contains the amount of weight that it *wants* to consume as the first item, and the actual amount (if -/// different from the former) in the second option. +/// Each item contains the amount of weight that it *wants* to consume as the first item, and the +/// actual amount (if different from the former) in the second option. #[derive(Debug, Encode, Decode, Eq, PartialEq, Clone, Copy, scale_info::TypeInfo)] pub enum TestCall { OnlyRoot(Weight, Option), diff --git a/xcm/xcm-builder/src/tests/querying.rs b/xcm/xcm-builder/src/tests/querying.rs index be8edfe87b8d..8fbb55eb2542 100644 --- a/xcm/xcm-builder/src/tests/querying.rs +++ b/xcm/xcm-builder/src/tests/querying.rs @@ -95,7 +95,8 @@ fn pallet_query_with_results_should_work() { #[test] fn prepaid_result_of_query_should_get_free_execution() { let query_id = 33; - // We put this in manually here, but normally this would be done at the point of crafting the message. + // We put this in manually here, but normally this would be done at the point of crafting the + // message. expect_response(query_id, Parent.into()); let the_response = Response::Assets((Parent, 100u128).into()); diff --git a/xcm/xcm-builder/src/universal_exports.rs b/xcm/xcm-builder/src/universal_exports.rs index 9a65ec7dfe40..0ee627e0ee90 100644 --- a/xcm/xcm-builder/src/universal_exports.rs +++ b/xcm/xcm-builder/src/universal_exports.rs @@ -300,7 +300,8 @@ pub trait HaulBlob { #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum HaulBlobError { - /// Represents point-to-point link failure with a human-readable explanation of the specific issue is provided. + /// Represents point-to-point link failure with a human-readable explanation of the specific + /// issue is provided. Transport(&'static str), } @@ -361,8 +362,9 @@ impl< message.try_into().map_err(|_| DispatchBlobError::UnsupportedXcmVersion)?; // Prepend our bridge instance discriminator. - // Can be used for fine-grained control of origin on destination in case of multiple bridge instances, - // e.g. restrict `type UniversalAliases` and `UniversalOrigin` instruction to trust just particular bridge instance for `NetworkId`. + // Can be used for fine-grained control of origin on destination in case of multiple bridge + // instances, e.g. restrict `type UniversalAliases` and `UniversalOrigin` instruction to + // trust just particular bridge instance for `NetworkId`. if let Some(bridge_instance) = OurPlaceBridgeInstance::get() { message.0.insert(0, DescendOrigin(bridge_instance)); } diff --git a/xcm/xcm-builder/src/weight.rs b/xcm/xcm-builder/src/weight.rs index 73cba6cb557b..f1c14a4c6517 100644 --- a/xcm/xcm-builder/src/weight.rs +++ b/xcm/xcm-builder/src/weight.rs @@ -114,8 +114,9 @@ where } } -/// Function trait for handling some revenue. Similar to a negative imbalance (credit) handler, but for a -/// `MultiAsset`. Sensible implementations will deposit the asset in some known treasury or block-author account. +/// Function trait for handling some revenue. Similar to a negative imbalance (credit) handler, but +/// for a `MultiAsset`. Sensible implementations will deposit the asset in some known treasury or +/// block-author account. pub trait TakeRevenue { /// Do something with the given `revenue`, which is a single non-wildcard `MultiAsset`. fn take_revenue(revenue: MultiAsset); diff --git a/xcm/xcm-builder/tests/scenarios.rs b/xcm/xcm-builder/tests/scenarios.rs index e587c4118e74..3e735720aa76 100644 --- a/xcm/xcm-builder/tests/scenarios.rs +++ b/xcm/xcm-builder/tests/scenarios.rs @@ -101,8 +101,8 @@ fn transfer_asset_works() { /// A parachain wants to be notified that a transfer worked correctly. /// It includes a `QueryHolding` order after the deposit to get notified on success. /// This somewhat abuses `QueryHolding` as an indication of execution success. It works because -/// order execution halts on error (so no `QueryResponse` will be sent if the previous order failed). -/// The inner response sent due to the query is not used. +/// order execution halts on error (so no `QueryResponse` will be sent if the previous order +/// failed). The inner response sent due to the query is not used. /// /// Asserts that the balances are updated correctly and the expected XCM is sent. #[test] diff --git a/xcm/xcm-executor/src/assets.rs b/xcm/xcm-executor/src/assets.rs index f5e0659931eb..d8d8936df331 100644 --- a/xcm/xcm-executor/src/assets.rs +++ b/xcm/xcm-executor/src/assets.rs @@ -132,15 +132,17 @@ impl Assets { /// Mutate `self` to contain all given `assets`, saturating if necessary. /// - /// NOTE: [`Assets`] are always sorted, allowing us to optimize this function from `O(n^2)` to `O(n)`. + /// NOTE: [`Assets`] are always sorted, allowing us to optimize this function from `O(n^2)` to + /// `O(n)`. pub fn subsume_assets(&mut self, mut assets: Assets) { let mut f_iter = assets.fungible.iter_mut(); let mut g_iter = self.fungible.iter_mut(); if let (Some(mut f), Some(mut g)) = (f_iter.next(), g_iter.next()) { loop { if f.0 == g.0 { - // keys are equal. in this case, we add `self`'s balance for the asset onto `assets`, balance, knowing - // that the `append` operation which follows will clobber `self`'s value and only use `assets`'s. + // keys are equal. in this case, we add `self`'s balance for the asset onto + // `assets`, balance, knowing that the `append` operation which follows will + // clobber `self`'s value and only use `assets`'s. (*f.1).saturating_accrue(*g.1); } if f.0 <= g.0 { @@ -186,8 +188,9 @@ impl Assets { /// Alter any concretely identified assets by prepending the given `MultiLocation`. /// - /// WARNING: For now we consider this infallible and swallow any errors. It is thus the caller's responsibility to - /// ensure that any internal asset IDs are able to be prepended without overflow. + /// WARNING: For now we consider this infallible and swallow any errors. It is thus the caller's + /// responsibility to ensure that any internal asset IDs are able to be prepended without + /// overflow. pub fn prepend_location(&mut self, prepend: &MultiLocation) { let mut fungible = Default::default(); mem::swap(&mut self.fungible, &mut fungible); @@ -269,8 +272,8 @@ impl Assets { self.non_fungible.is_superset(&assets.non_fungible) } - /// Returns an error unless all `assets` are contained in `self`. In the case of an error, the first asset in - /// `assets` which is not wholly in `self` is returned. + /// Returns an error unless all `assets` are contained in `self`. In the case of an error, the + /// first asset in `assets` which is not wholly in `self` is returned. pub fn ensure_contains(&self, assets: &MultiAssets) -> Result<(), TakeError> { for asset in assets.inner().iter() { match asset { @@ -292,16 +295,17 @@ impl Assets { /// Mutates `self` to its original value less `mask` and returns assets that were removed. /// - /// If `saturate` is `true`, then `self` is considered to be masked by `mask`, thereby avoiding any attempt at - /// reducing it by assets it does not contain. In this case, the function is infallible. If `saturate` is `false` - /// and `mask` references a definite asset which `self` does not contain then an error is returned. + /// If `saturate` is `true`, then `self` is considered to be masked by `mask`, thereby avoiding + /// any attempt at reducing it by assets it does not contain. In this case, the function is + /// infallible. If `saturate` is `false` and `mask` references a definite asset which `self` + /// does not contain then an error is returned. /// /// The number of unique assets which are removed will respect the `count` parameter in the /// counted wildcard variants. /// - /// Returns `Ok` with the definite assets token from `self` and mutates `self` to its value minus - /// `mask`. Returns `Err` in the non-saturating case where `self` did not contain (enough of) a definite asset to - /// be removed. + /// Returns `Ok` with the definite assets token from `self` and mutates `self` to its value + /// minus `mask`. Returns `Err` in the non-saturating case where `self` did not contain (enough + /// of) a definite asset to be removed. fn general_take( &mut self, mask: MultiAssetFilter, @@ -386,24 +390,27 @@ impl Assets { Ok(taken) } - /// Mutates `self` to its original value less `mask` and returns `true` iff it contains at least `mask`. + /// Mutates `self` to its original value less `mask` and returns `true` iff it contains at least + /// `mask`. /// - /// Returns `Ok` with the non-wildcard equivalence of `mask` taken and mutates `self` to its value minus - /// `mask` if `self` contains `asset`, and return `Err` otherwise. + /// Returns `Ok` with the non-wildcard equivalence of `mask` taken and mutates `self` to its + /// value minus `mask` if `self` contains `asset`, and return `Err` otherwise. pub fn saturating_take(&mut self, asset: MultiAssetFilter) -> Assets { self.general_take(asset, true) .expect("general_take never results in error when saturating") } - /// Mutates `self` to its original value less `mask` and returns `true` iff it contains at least `mask`. + /// Mutates `self` to its original value less `mask` and returns `true` iff it contains at least + /// `mask`. /// - /// Returns `Ok` with the non-wildcard equivalence of `asset` taken and mutates `self` to its value minus - /// `asset` if `self` contains `asset`, and return `Err` otherwise. + /// Returns `Ok` with the non-wildcard equivalence of `asset` taken and mutates `self` to its + /// value minus `asset` if `self` contains `asset`, and return `Err` otherwise. pub fn try_take(&mut self, mask: MultiAssetFilter) -> Result { self.general_take(mask, false) } - /// Consumes `self` and returns its original value excluding `asset` iff it contains at least `asset`. + /// Consumes `self` and returns its original value excluding `asset` iff it contains at least + /// `asset`. pub fn checked_sub(mut self, asset: MultiAsset) -> Result { match asset.fun { Fungible(amount) => { diff --git a/xcm/xcm-executor/src/lib.rs b/xcm/xcm-executor/src/lib.rs index 57ddc4322923..a48cd3259d67 100644 --- a/xcm/xcm-executor/src/lib.rs +++ b/xcm/xcm-executor/src/lib.rs @@ -356,7 +356,8 @@ impl XcmExecutor { } /// Execute any final operations after having executed the XCM message. - /// This includes refunding surplus weight, trapping extra holding funds, and returning any errors during execution. + /// This includes refunding surplus weight, trapping extra holding funds, and returning any + /// errors during execution. pub fn post_process(mut self, xcm_weight: Weight) -> Outcome { // We silently drop any error from our attempt to refund the surplus as it's a charitable // thing so best-effort is all we will do. @@ -533,9 +534,10 @@ impl XcmExecutor { Config::IsTeleporter::contains(asset, &origin), XcmError::UntrustedTeleportLocation ); - // We should check that the asset can actually be teleported in (for this to be in error, there - // would need to be an accounting violation by one of the trusted chains, so it's unlikely, but we - // don't want to punish a possibly innocent chain/user). + // We should check that the asset can actually be teleported in (for this to be + // in error, there would need to be an accounting violation by one of the + // trusted chains, so it's unlikely, but we don't want to punish a possibly + // innocent chain/user). Config::AssetTransactor::can_check_in(&origin, asset, &self.context)?; } for asset in assets.into_inner().into_iter() { @@ -603,8 +605,8 @@ impl XcmExecutor { Ok(()) }, ReportError(response_info) => { - // Report the given result by sending a QueryResponse XCM to a previously given outcome - // destination if one was registered. + // Report the given result by sending a QueryResponse XCM to a previously given + // outcome destination if one was registered. self.respond( self.cloned_origin(), Response::ExecutionResult(self.error), @@ -823,10 +825,12 @@ impl XcmExecutor { Ok(()) }, ExportMessage { network, destination, xcm } => { - // The actual message sent to the bridge for forwarding is prepended with `UniversalOrigin` - // and `DescendOrigin` in order to ensure that the message is executed with this Origin. + // The actual message sent to the bridge for forwarding is prepended with + // `UniversalOrigin` and `DescendOrigin` in order to ensure that the message is + // executed with this Origin. // - // Prepend the desired message with instructions which effectively rewrite the origin. + // Prepend the desired message with instructions which effectively rewrite the + // origin. // // This only works because the remote chain empowers the bridge // to speak for the local network. diff --git a/xcm/xcm-executor/src/traits/asset_exchange.rs b/xcm/xcm-executor/src/traits/asset_exchange.rs index 465468992ae4..0cb188d348de 100644 --- a/xcm/xcm-executor/src/traits/asset_exchange.rs +++ b/xcm/xcm-executor/src/traits/asset_exchange.rs @@ -24,8 +24,8 @@ pub trait AssetExchange { /// - `origin`: The location attempting the exchange; this should generally not matter. /// - `give`: The assets which have been removed from the caller. /// - `want`: The minimum amount of assets which should be given to the caller in case any - /// exchange happens. If more assets are provided, then they should generally be of the - /// same asset class if at all possible. + /// exchange happens. If more assets are provided, then they should generally be of the same + /// asset class if at all possible. /// - `maximal`: If `true`, then as much as possible should be exchanged. /// /// `Ok` is returned along with the new set of assets which have been exchanged for `give`. At diff --git a/xcm/xcm-executor/src/traits/asset_lock.rs b/xcm/xcm-executor/src/traits/asset_lock.rs index bb19e90b0c36..b5a2b22f5fc5 100644 --- a/xcm/xcm-executor/src/traits/asset_lock.rs +++ b/xcm/xcm-executor/src/traits/asset_lock.rs @@ -69,8 +69,8 @@ pub trait AssetLock { /// unlock. type UnlockTicket: Enact; - /// `Enact` implementer for `prepare_reduce_unlockable`. This type may be dropped safely to avoid doing the - /// unlock. + /// `Enact` implementer for `prepare_reduce_unlockable`. This type may be dropped safely to + /// avoid doing the unlock. type ReduceTicket: Enact; /// Prepare to lock an asset. On success, a `Self::LockTicket` it returned, which can be used diff --git a/xcm/xcm-executor/src/traits/conversion.rs b/xcm/xcm-executor/src/traits/conversion.rs index 2f584a900f69..dac099ffaf8e 100644 --- a/xcm/xcm-executor/src/traits/conversion.rs +++ b/xcm/xcm-executor/src/traits/conversion.rs @@ -40,9 +40,9 @@ impl ConvertLocation for Tuple { /// A converter `trait` for origin types. /// -/// Can be amalgamated into tuples. If any of the tuple elements returns `Ok(_)`, it short circuits. Else, the `Err(_)` -/// of the last tuple item is returned. Each intermediate `Err(_)` might return a different `origin` of type `Origin` -/// which is passed to the next convert item. +/// Can be amalgamated into tuples. If any of the tuple elements returns `Ok(_)`, it short circuits. +/// Else, the `Err(_)` of the last tuple item is returned. Each intermediate `Err(_)` might return a +/// different `origin` of type `Origin` which is passed to the next convert item. /// /// ```rust /// # use xcm::latest::{MultiLocation, Junctions, Junction, OriginKind}; diff --git a/xcm/xcm-executor/src/traits/filter_asset_location.rs b/xcm/xcm-executor/src/traits/filter_asset_location.rs index 7aeb26b28094..b162a8b0729d 100644 --- a/xcm/xcm-executor/src/traits/filter_asset_location.rs +++ b/xcm/xcm-executor/src/traits/filter_asset_location.rs @@ -19,7 +19,8 @@ use xcm::latest::{MultiAsset, MultiLocation}; /// Filters assets/location pairs. /// -/// Can be amalgamated into tuples. If any item returns `true`, it short-circuits, else `false` is returned. +/// Can be amalgamated into tuples. If any item returns `true`, it short-circuits, else `false` is +/// returned. #[deprecated = "Use `frame_support::traits::ContainsPair` instead"] pub trait FilterAssetLocation { /// A filter to distinguish between asset/location pairs. diff --git a/xcm/xcm-executor/src/traits/on_response.rs b/xcm/xcm-executor/src/traits/on_response.rs index 34bb7eb9597d..b0f8b35bb98f 100644 --- a/xcm/xcm-executor/src/traits/on_response.rs +++ b/xcm/xcm-executor/src/traits/on_response.rs @@ -107,11 +107,14 @@ impl VersionChangeNotifier for () { /// The possible state of an XCM query response. #[derive(Debug, PartialEq, Eq)] pub enum QueryResponseStatus { - /// The response has arrived, and includes the inner Response and the block number it arrived at. + /// The response has arrived, and includes the inner Response and the block number it arrived + /// at. Ready { response: Response, at: BlockNumber }, - /// The response has not yet arrived, the XCM might still be executing or the response might be in transit. + /// The response has not yet arrived, the XCM might still be executing or the response might be + /// in transit. Pending { timeout: BlockNumber }, - /// No response with the given `QueryId` was found, or the response was already queried and removed from local storage. + /// No response with the given `QueryId` was found, or the response was already queried and + /// removed from local storage. NotFound, /// Got an unexpected XCM version. UnexpectedVersion, @@ -144,7 +147,8 @@ pub trait QueryHandler { /// /// - `message`: The message whose outcome should be reported. /// - `responder`: The origin from which a response should be expected. - /// - `timeout`: The block number after which it is permissible to return `NotFound` from `take_response`. + /// - `timeout`: The block number after which it is permissible to return `NotFound` from + /// `take_response`. /// /// `report_outcome` may return an error if the `responder` is not invertible. /// diff --git a/xcm/xcm-executor/src/traits/should_execute.rs b/xcm/xcm-executor/src/traits/should_execute.rs index 2b634e375136..d85458b54709 100644 --- a/xcm/xcm-executor/src/traits/should_execute.rs +++ b/xcm/xcm-executor/src/traits/should_execute.rs @@ -32,8 +32,8 @@ pub struct Properties { /// Trait to determine whether the execution engine should actually execute a given XCM. /// -/// Can be amalgamated into a tuple to have multiple trials. If any of the tuple elements returns `Ok()`, the -/// execution stops. Else, `Err(_)` is returned if all elements reject the message. +/// Can be amalgamated into a tuple to have multiple trials. If any of the tuple elements returns +/// `Ok()`, the execution stops. Else, `Err(_)` is returned if all elements reject the message. pub trait ShouldExecute { /// Returns `true` if the given `message` may be executed. /// diff --git a/xcm/xcm-executor/src/traits/transact_asset.rs b/xcm/xcm-executor/src/traits/transact_asset.rs index 832397a0fd25..34cdb0c71413 100644 --- a/xcm/xcm-executor/src/traits/transact_asset.rs +++ b/xcm/xcm-executor/src/traits/transact_asset.rs @@ -20,11 +20,13 @@ use xcm::latest::{Error as XcmError, MultiAsset, MultiLocation, Result as XcmRes /// Facility for asset transacting. /// -/// This should work with as many asset/location combinations as possible. Locations to support may include non-account -/// locations such as a `MultiLocation::X1(Junction::Parachain)`. Different chains may handle them in different ways. +/// This should work with as many asset/location combinations as possible. Locations to support may +/// include non-account locations such as a `MultiLocation::X1(Junction::Parachain)`. Different +/// chains may handle them in different ways. /// -/// Can be amalgamated as a tuple of items that implement this trait. In such executions, if any of the transactors -/// returns `Ok(())`, then it will short circuit. Else, execution is passed to the next transactor. +/// Can be amalgamated as a tuple of items that implement this trait. In such executions, if any of +/// the transactors returns `Ok(())`, then it will short circuit. Else, execution is passed to the +/// next transactor. pub trait TransactAsset { /// Ensure that `check_in` will do as expected. /// @@ -37,19 +39,23 @@ pub trait TransactAsset { Err(XcmError::Unimplemented) } - /// An asset has been teleported in from the given origin. This should do whatever housekeeping is needed. + /// An asset has been teleported in from the given origin. This should do whatever housekeeping + /// is needed. /// - /// NOTE: This will make only a best-effort at bookkeeping. The caller should ensure that `can_check_in` has - /// returned with `Ok` in order to guarantee that this operation proceeds properly. + /// NOTE: This will make only a best-effort at bookkeeping. The caller should ensure that + /// `can_check_in` has returned with `Ok` in order to guarantee that this operation proceeds + /// properly. /// - /// Implementation note: In general this will do one of two things: On chains where the asset is native, - /// it will reduce the assets from a special "teleported" account so that a) total-issuance is preserved; - /// and b) to ensure that no more assets can be teleported in than were teleported out overall (this should - /// not be needed if the teleporting chains are to be trusted, but better to be safe than sorry). On chains - /// where the asset is not native then it will generally just be a no-op. + /// Implementation note: In general this will do one of two things: On chains where the asset is + /// native, it will reduce the assets from a special "teleported" account so that a) + /// total-issuance is preserved; and b) to ensure that no more assets can be teleported in than + /// were teleported out overall (this should not be needed if the teleporting chains are to be + /// trusted, but better to be safe than sorry). On chains where the asset is not native then it + /// will generally just be a no-op. /// - /// When composed as a tuple, all type-items are called. It is up to the implementer that there exists no - /// value for `_what` which can cause side-effects for more than one of the type-items. + /// When composed as a tuple, all type-items are called. It is up to the implementer that there + /// exists no value for `_what` which can cause side-effects for more than one of the + /// type-items. fn check_in(_origin: &MultiLocation, _what: &MultiAsset, _context: &XcmContext) {} /// Ensure that `check_out` will do as expected. @@ -63,16 +69,19 @@ pub trait TransactAsset { Err(XcmError::Unimplemented) } - /// An asset has been teleported out to the given destination. This should do whatever housekeeping is needed. + /// An asset has been teleported out to the given destination. This should do whatever + /// housekeeping is needed. /// - /// Implementation note: In general this will do one of two things: On chains where the asset is native, - /// it will increase the assets in a special "teleported" account so that a) total-issuance is preserved; and - /// b) to ensure that no more assets can be teleported in than were teleported out overall (this should not - /// be needed if the teleporting chains are to be trusted, but better to be safe than sorry). On chains where - /// the asset is not native then it will generally just be a no-op. + /// Implementation note: In general this will do one of two things: On chains where the asset is + /// native, it will increase the assets in a special "teleported" account so that a) + /// total-issuance is preserved; and b) to ensure that no more assets can be teleported in than + /// were teleported out overall (this should not be needed if the teleporting chains are to be + /// trusted, but better to be safe than sorry). On chains where the asset is not native then it + /// will generally just be a no-op. /// - /// When composed as a tuple, all type-items are called. It is up to the implementer that there exists no - /// value for `_what` which can cause side-effects for more than one of the type-items. + /// When composed as a tuple, all type-items are called. It is up to the implementer that there + /// exists no value for `_what` which can cause side-effects for more than one of the + /// type-items. fn check_out(_dest: &MultiLocation, _what: &MultiAsset, _context: &XcmContext) {} /// Deposit the `what` asset into the account of `who`. diff --git a/xcm/xcm-executor/src/traits/weight.rs b/xcm/xcm-executor/src/traits/weight.rs index 06e6b5f55bce..bc40c10074f5 100644 --- a/xcm/xcm-executor/src/traits/weight.rs +++ b/xcm/xcm-executor/src/traits/weight.rs @@ -56,8 +56,8 @@ pub trait WeightTrader: Sized { context: &XcmContext, ) -> Result; - /// Attempt a refund of `weight` into some asset. The caller does not guarantee that the weight was - /// purchased using `buy_weight`. + /// Attempt a refund of `weight` into some asset. The caller does not guarantee that the weight + /// was purchased using `buy_weight`. /// /// Default implementation refunds nothing. fn refund_weight(&mut self, _weight: Weight, _context: &XcmContext) -> Option { @@ -93,8 +93,8 @@ impl WeightTrader for Tuple { log::trace!(target: "xcm::buy_weight", "last_error: {:?}, too_expensive_error_found: {}", last_error, too_expensive_error_found); - // if we have multiple traders, and first one returns `TooExpensive` and others fail e.g. `AssetNotFound` - // then it is more accurate to return `TooExpensive` then `AssetNotFound` + // if we have multiple traders, and first one returns `TooExpensive` and others fail e.g. + // `AssetNotFound` then it is more accurate to return `TooExpensive` then `AssetNotFound` Err(if too_expensive_error_found { XcmError::TooExpensive } else { diff --git a/xcm/xcm-simulator/src/lib.rs b/xcm/xcm-simulator/src/lib.rs index f98eb6e571e6..cf56784f7d4e 100644 --- a/xcm/xcm-simulator/src/lib.rs +++ b/xcm/xcm-simulator/src/lib.rs @@ -161,12 +161,12 @@ macro_rules! decl_test_relay_chain { /// /// ```ignore /// decl_test_parachain! { -/// pub struct ParaA { -/// Runtime = parachain::Runtime, -/// XcmpMessageHandler = parachain::MsgQueue, -/// DmpMessageHandler = parachain::MsgQueue, -/// new_ext = para_ext(), -/// } +/// pub struct ParaA { +/// Runtime = parachain::Runtime, +/// XcmpMessageHandler = parachain::MsgQueue, +/// DmpMessageHandler = parachain::MsgQueue, +/// new_ext = para_ext(), +/// } /// } /// ``` #[macro_export] @@ -272,13 +272,13 @@ thread_local! { /// /// ```ignore /// decl_test_network! { -/// pub struct ExampleNet { -/// relay_chain = Relay, -/// parachains = vec![ -/// (1, ParaA), -/// (2, ParaB), -/// ], -/// } +/// pub struct ExampleNet { +/// relay_chain = Relay, +/// parachains = vec![ +/// (1, ParaA), +/// (2, ParaB), +/// ], +/// } /// } /// ``` #[macro_export] From ffb8d15d6ff0e382fbca5d332f46657d0e8a71b1 Mon Sep 17 00:00:00 2001 From: Aaro Altonen <48052676+altonen@users.noreply.github.com> Date: Mon, 14 Aug 2023 17:34:38 +0300 Subject: [PATCH 31/35] Disable validation/collation protocols for normal full nodes (#7601) If authority discovery is not enabled, `Overseer` is not enabled, meaning `NetworkBridge` is not started. Validation/collation protocols are, however, enabled even if the `NetworkBridge` is not started. Currently this results in normal Polkadot full nodes advertising these protocols, accepting inbound substreams and even establishing outbound substreams for the validation protocol. Since the `NetworkBridge` is not started and no protocol in Substrate is interested in these protocol events, the events are relayed to all protocol handlers but are getting discarded because no installed protocol is interested in them. Co-authored-by: parity-processbot <> --- node/service/src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index 4dda57110825..d0b6db17ed0e 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -828,10 +828,11 @@ pub fn new_full( net_config.add_request_response_protocol(beefy_req_resp_cfg); } + // validation/collation protocols are enabled only if `Overseer` is enabled let peerset_protocol_names = PeerSetProtocolNames::new(genesis_hash, config.chain_spec.fork_id()); - { + if auth_or_collator || overseer_enable_anyways { use polkadot_network_bridge::{peer_sets_info, IsAuthority}; let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No }; for config in peer_sets_info(is_authority, &peerset_protocol_names) { From 74b2fec195e17ff2e9003e12dd035ab9d1a989f5 Mon Sep 17 00:00:00 2001 From: Lulu Date: Mon, 14 Aug 2023 16:31:13 +0100 Subject: [PATCH 32/35] Don't publish test crates (#7588) --- node/subsystem-test-helpers/Cargo.toml | 1 + node/test/client/Cargo.toml | 1 + node/test/performance-test/Cargo.toml | 1 + node/test/service/Cargo.toml | 1 + parachain/test-parachains/adder/Cargo.toml | 1 + parachain/test-parachains/adder/collator/Cargo.toml | 1 + parachain/test-parachains/halt/Cargo.toml | 1 + parachain/test-parachains/undying/Cargo.toml | 1 + parachain/test-parachains/undying/collator/Cargo.toml | 1 + primitives/test-helpers/Cargo.toml | 1 + runtime/test-runtime/Cargo.toml | 1 + runtime/test-runtime/constants/Cargo.toml | 1 + utils/remote-ext-tests/bags-list/Cargo.toml | 1 + xcm/xcm-executor/integration-tests/Cargo.toml | 1 + 14 files changed, 14 insertions(+) diff --git a/node/subsystem-test-helpers/Cargo.toml b/node/subsystem-test-helpers/Cargo.toml index 81bc19a13031..adb0587370ec 100644 --- a/node/subsystem-test-helpers/Cargo.toml +++ b/node/subsystem-test-helpers/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "polkadot-node-subsystem-test-helpers" description = "Subsystem traits and message definitions" +publish = false version.workspace = true authors.workspace = true edition.workspace = true diff --git a/node/test/client/Cargo.toml b/node/test/client/Cargo.toml index 33c240443d02..aac46bd4b8fc 100644 --- a/node/test/client/Cargo.toml +++ b/node/test/client/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "polkadot-test-client" +publish = false version.workspace = true authors.workspace = true edition.workspace = true diff --git a/node/test/performance-test/Cargo.toml b/node/test/performance-test/Cargo.toml index c6d0ce7f7ec9..1bddc6b08702 100644 --- a/node/test/performance-test/Cargo.toml +++ b/node/test/performance-test/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "polkadot-performance-test" +publish = false version.workspace = true authors.workspace = true edition.workspace = true diff --git a/node/test/service/Cargo.toml b/node/test/service/Cargo.toml index 08e9e3889b06..8912e19306e0 100644 --- a/node/test/service/Cargo.toml +++ b/node/test/service/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "polkadot-test-service" +publish = false version.workspace = true authors.workspace = true edition.workspace = true diff --git a/parachain/test-parachains/adder/Cargo.toml b/parachain/test-parachains/adder/Cargo.toml index 5e1b9a7d174c..d2b2224328a7 100644 --- a/parachain/test-parachains/adder/Cargo.toml +++ b/parachain/test-parachains/adder/Cargo.toml @@ -6,6 +6,7 @@ edition.workspace = true license.workspace = true version.workspace = true authors.workspace = true +publish = false [dependencies] parachain = { package = "polkadot-parachain", path = "../../", default-features = false, features = [ "wasm-api" ] } diff --git a/parachain/test-parachains/adder/collator/Cargo.toml b/parachain/test-parachains/adder/collator/Cargo.toml index 08dcbcaa644e..fad51a863a15 100644 --- a/parachain/test-parachains/adder/collator/Cargo.toml +++ b/parachain/test-parachains/adder/collator/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "test-parachain-adder-collator" description = "Collator for the adder test parachain" +publish = false version.workspace = true authors.workspace = true edition.workspace = true diff --git a/parachain/test-parachains/halt/Cargo.toml b/parachain/test-parachains/halt/Cargo.toml index 99076aae6aa3..85ee5d99d891 100644 --- a/parachain/test-parachains/halt/Cargo.toml +++ b/parachain/test-parachains/halt/Cargo.toml @@ -2,6 +2,7 @@ name = "test-parachain-halt" description = "Test parachain which executes forever" build = "build.rs" +publish = false version.workspace = true authors.workspace = true edition.workspace = true diff --git a/parachain/test-parachains/undying/Cargo.toml b/parachain/test-parachains/undying/Cargo.toml index 43cb1bc37fda..030032e7754d 100644 --- a/parachain/test-parachains/undying/Cargo.toml +++ b/parachain/test-parachains/undying/Cargo.toml @@ -2,6 +2,7 @@ name = "test-parachain-undying" description = "Test parachain for zombienet integration tests" build = "build.rs" +publish = false version.workspace = true authors.workspace = true edition.workspace = true diff --git a/parachain/test-parachains/undying/collator/Cargo.toml b/parachain/test-parachains/undying/collator/Cargo.toml index 5b5656efb4ac..b0118555506c 100644 --- a/parachain/test-parachains/undying/collator/Cargo.toml +++ b/parachain/test-parachains/undying/collator/Cargo.toml @@ -5,6 +5,7 @@ edition.workspace = true license.workspace = true version.workspace = true authors.workspace = true +publish = false [[bin]] name = "undying-collator" diff --git a/primitives/test-helpers/Cargo.toml b/primitives/test-helpers/Cargo.toml index a1f7f9268b9f..b43bac1e8550 100644 --- a/primitives/test-helpers/Cargo.toml +++ b/primitives/test-helpers/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "polkadot-primitives-test-helpers" +publish = false version.workspace = true authors.workspace = true edition.workspace = true diff --git a/runtime/test-runtime/Cargo.toml b/runtime/test-runtime/Cargo.toml index 76bd63d59462..41fbebb39f3a 100644 --- a/runtime/test-runtime/Cargo.toml +++ b/runtime/test-runtime/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "polkadot-test-runtime" build = "build.rs" +publish = false version.workspace = true authors.workspace = true edition.workspace = true diff --git a/runtime/test-runtime/constants/Cargo.toml b/runtime/test-runtime/constants/Cargo.toml index 9b435da80682..15ab1dbdd4fe 100644 --- a/runtime/test-runtime/constants/Cargo.toml +++ b/runtime/test-runtime/constants/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "test-runtime-constants" +publish = false version.workspace = true authors.workspace = true edition.workspace = true diff --git a/utils/remote-ext-tests/bags-list/Cargo.toml b/utils/remote-ext-tests/bags-list/Cargo.toml index 772efb1eddd0..c84c95ab0498 100644 --- a/utils/remote-ext-tests/bags-list/Cargo.toml +++ b/utils/remote-ext-tests/bags-list/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "remote-ext-tests-bags-list" +publish = false version.workspace = true authors.workspace = true edition.workspace = true diff --git a/xcm/xcm-executor/integration-tests/Cargo.toml b/xcm/xcm-executor/integration-tests/Cargo.toml index d2af1304beb6..18a729e082d2 100644 --- a/xcm/xcm-executor/integration-tests/Cargo.toml +++ b/xcm/xcm-executor/integration-tests/Cargo.toml @@ -5,6 +5,7 @@ authors.workspace = true edition.workspace = true license.workspace = true version.workspace = true +publish = false [dependencies] frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } From 2a2393fa7f06be0986d6956ebf04f5b31d81cca5 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Mon, 14 Aug 2023 12:14:30 -0400 Subject: [PATCH 33/35] PVF workers: some fixes for cargo run and cargo install (#7608) - Update some places where `cargo run` was used - Add note to error messages about `cargo build` before `cargo run` - Fix call to `cargo install` in readme --- README.md | 7 ++++++- node/service/src/lib.rs | 4 ++-- parachain/test-parachains/adder/collator/README.md | 8 +++++++- utils/staking-miner/README.md | 2 ++ 4 files changed, 17 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index f3d1f5e276cd..c6e969760362 100644 --- a/README.md +++ b/README.md @@ -93,7 +93,11 @@ cargo build --release **Note:** compilation is a memory intensive process. We recommend having 4 GiB of physical RAM or swap available (keep in mind that if a build hits swap it tends to be very slow). -**Note:** if you want to move the built `polkadot` binary somewhere (e.g. into $PATH) you will also need to move `polkadot-execute-worker` and `polkadot-prepare-worker`. You can let cargo do all this for you by running `cargo install --path .`. +**Note:** if you want to move the built `polkadot` binary somewhere (e.g. into $PATH) you will also need to move `polkadot-execute-worker` and `polkadot-prepare-worker`. You can let cargo do all this for you by running: + +```sh +cargo install --path . --locked +``` #### Build from Source with Docker @@ -193,6 +197,7 @@ cargo test --workspace --release You can start a development chain with: ```bash +cargo build cargo run -- --dev ``` diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index d0b6db17ed0e..d42c737330cd 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -243,7 +243,7 @@ pub enum Error { InvalidWorkerBinaries { prep_worker_path: PathBuf, exec_worker_path: PathBuf }, #[cfg(feature = "full-node")] - #[error("Worker binaries could not be found, make sure polkadot was built/installed correctly. Searched given workers path ({given_workers_path:?}), polkadot binary path ({current_exe_path:?}), and lib path (/usr/lib/polkadot), workers names: {workers_names:?}")] + #[error("Worker binaries could not be found, make sure polkadot was built/installed correctly. If you ran with `cargo run`, please run `cargo build` first. Searched given workers path ({given_workers_path:?}), polkadot binary path ({current_exe_path:?}), and lib path (/usr/lib/polkadot), workers names: {workers_names:?}")] MissingWorkerBinaries { given_workers_path: Option, current_exe_path: PathBuf, @@ -251,7 +251,7 @@ pub enum Error { }, #[cfg(feature = "full-node")] - #[error("Version of worker binary ({worker_version}) is different from node version ({node_version}), worker_path: {worker_path}. TESTING ONLY: this check can be disabled with --disable-worker-version-check")] + #[error("Version of worker binary ({worker_version}) is different from node version ({node_version}), worker_path: {worker_path}. If you ran with `cargo run`, please run `cargo build` first, otherwise try to `cargo clean`. TESTING ONLY: this check can be disabled with --disable-worker-version-check")] WorkerBinaryVersionMismatch { worker_version: String, node_version: String, diff --git a/parachain/test-parachains/adder/collator/README.md b/parachain/test-parachains/adder/collator/README.md index 4347a9a8ced7..a1378544c386 100644 --- a/parachain/test-parachains/adder/collator/README.md +++ b/parachain/test-parachains/adder/collator/README.md @@ -1,6 +1,12 @@ # How to run this collator -First start two validators that will run for the relay chain: +First, build Polkadot: + +```sh +cargo build --release +``` + +Then start two validators that will run for the relay chain: ```sh cargo run --release -- -d alice --chain rococo-local --validator --alice --port 50551 diff --git a/utils/staking-miner/README.md b/utils/staking-miner/README.md index b7f70de573b0..7e7254dc7759 100644 --- a/utils/staking-miner/README.md +++ b/utils/staking-miner/README.md @@ -64,5 +64,7 @@ docker run --rm -i \ ### Test locally +Make sure you've built Polkadot, then: + 1. `cargo run -p polkadot --features fast-runtime -- --chain polkadot-dev --tmp --alice -lruntime=debug` 2. `cargo run -p staking-miner -- --uri ws://localhost:9944 monitor --seed-or-path //Alice phrag-mms` From ed8f0f82433fa76153b4bcf169e4cc30d04e7ff5 Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Mon, 14 Aug 2023 14:30:12 -0700 Subject: [PATCH 34/35] XCM: Rename Instruction instructions to Command instructions (#7593) Co-authored-by: parity-processbot <> --- xcm/src/v2/mod.rs | 44 +++++++++++++------------- xcm/src/v3/mod.rs | 80 +++++++++++++++++++++++------------------------ 2 files changed, 62 insertions(+), 62 deletions(-) diff --git a/xcm/src/v2/mod.rs b/xcm/src/v2/mod.rs index 79cc8ead89a1..8a67b771c9e9 100644 --- a/xcm/src/v2/mod.rs +++ b/xcm/src/v2/mod.rs @@ -434,7 +434,7 @@ pub enum Instruction { /// /// - `assets`: The asset(s) to be withdrawn into holding. /// - /// Kind: *Instruction*. + /// Kind: *Command*. /// /// Errors: WithdrawAsset(MultiAssets), @@ -492,7 +492,7 @@ pub enum Instruction { /// /// Safety: No concerns. /// - /// Kind: *Instruction*. + /// Kind: *Command*. /// /// Errors: TransferAsset { assets: MultiAssets, beneficiary: MultiLocation }, @@ -512,7 +512,7 @@ pub enum Instruction { /// /// Safety: No concerns. /// - /// Kind: *Instruction*. + /// Kind: *Command*. /// /// Errors: TransferReserveAsset { assets: MultiAssets, dest: MultiLocation, xcm: Xcm<()> }, @@ -527,7 +527,7 @@ pub enum Instruction { /// /// Safety: No concerns. /// - /// Kind: *Instruction*. + /// Kind: *Command*. /// /// Errors: Transact { @@ -600,14 +600,14 @@ pub enum Instruction { /// /// Safety: No concerns. /// - /// Kind: *Instruction*. + /// Kind: *Command*. /// /// Errors: ClearOrigin, /// Mutate the origin to some interior location. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: DescendOrigin(InteriorMultiLocation), @@ -623,7 +623,7 @@ pub enum Instruction { /// is sent as a reply may take to execute. NOTE: If this is unexpectedly large then the /// response may not execute at all. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: ReportError { @@ -643,7 +643,7 @@ pub enum Instruction { /// removed, prioritized under standard asset ordering. Any others will remain in holding. /// - `beneficiary`: The new owner for the assets. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: DepositAsset { @@ -669,7 +669,7 @@ pub enum Instruction { /// - `xcm`: The orders that should follow the `ReserveAssetDeposited` instruction which is /// sent onwards to `dest`. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: DepositReserveAsset { @@ -689,7 +689,7 @@ pub enum Instruction { /// - `give`: The asset(s) to remove from holding. /// - `receive`: The minimum amount of assets(s) which `give` should be exchanged for. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: ExchangeAsset { give: MultiAssetFilter, receive: MultiAssets }, @@ -705,7 +705,7 @@ pub enum Instruction { /// - `xcm`: The instructions to execute on the assets once withdrawn *on the reserve /// location*. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: InitiateReserveWithdraw { assets: MultiAssetFilter, reserve: MultiLocation, xcm: Xcm<()> }, @@ -721,7 +721,7 @@ pub enum Instruction { /// NOTE: The `dest` location *MUST* respect this origin as a valid teleportation origin for /// all `assets`. If it does not, then the assets may be lost. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: InitiateTeleport { assets: MultiAssetFilter, dest: MultiLocation, xcm: Xcm<()> }, @@ -739,7 +739,7 @@ pub enum Instruction { /// is sent as a reply may take to execute. NOTE: If this is unexpectedly large then the /// response may not execute at all. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: QueryHolding { @@ -759,14 +759,14 @@ pub enum Instruction { /// expected maximum weight of the total XCM to be executed for the /// `AllowTopLevelPaidExecutionFrom` barrier to allow the XCM be executed. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: BuyExecution { fees: MultiAsset, weight_limit: WeightLimit }, /// Refund any surplus weight previously bought with `BuyExecution`. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: None. RefundSurplus, @@ -782,7 +782,7 @@ pub enum Instruction { /// weight however includes only the difference between the previous handler and the new /// handler, which can reasonably be negative, which would result in a surplus. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: None. SetErrorHandler(Xcm), @@ -798,14 +798,14 @@ pub enum Instruction { /// weight however includes only the difference between the previous appendix and the new /// appendix, which can reasonably be negative, which would result in a surplus. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: None. SetAppendix(Xcm), /// Clear the Error Register. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: None. ClearError, @@ -817,14 +817,14 @@ pub enum Instruction { /// - `ticket`: The ticket of the asset; this is an abstract identifier to help locate the /// asset. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: ClaimAsset { assets: MultiAssets, ticket: MultiLocation }, /// Always throws an error of type `Trap`. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: /// - `Trap`: All circumstances, whose inner value is the same as this item's inner value. @@ -839,7 +839,7 @@ pub enum Instruction { /// is sent as a reply may take to execute. NOTE: If this is unexpectedly large then the /// response may not execute at all. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: *Fallible* SubscribeVersion { @@ -851,7 +851,7 @@ pub enum Instruction { /// Cancel the effect of a previous `SubscribeVersion` instruction. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: *Fallible* UnsubscribeVersion, diff --git a/xcm/src/v3/mod.rs b/xcm/src/v3/mod.rs index 3614dc22550d..360867957862 100644 --- a/xcm/src/v3/mod.rs +++ b/xcm/src/v3/mod.rs @@ -380,7 +380,7 @@ pub enum Instruction { /// /// - `assets`: The asset(s) to be withdrawn into holding. /// - /// Kind: *Instruction*. + /// Kind: *Command*. /// /// Errors: WithdrawAsset(MultiAssets), @@ -444,7 +444,7 @@ pub enum Instruction { /// /// Safety: No concerns. /// - /// Kind: *Instruction*. + /// Kind: *Command*. /// /// Errors: TransferAsset { assets: MultiAssets, beneficiary: MultiLocation }, @@ -464,7 +464,7 @@ pub enum Instruction { /// /// Safety: No concerns. /// - /// Kind: *Instruction*. + /// Kind: *Command*. /// /// Errors: TransferReserveAsset { assets: MultiAssets, dest: MultiLocation, xcm: Xcm<()> }, @@ -481,7 +481,7 @@ pub enum Instruction { /// /// Safety: No concerns. /// - /// Kind: *Instruction*. + /// Kind: *Command*. /// /// Errors: Transact { origin_kind: OriginKind, require_weight_at_most: Weight, call: DoubleEncoded }, @@ -549,14 +549,14 @@ pub enum Instruction { /// /// Safety: No concerns. /// - /// Kind: *Instruction*. + /// Kind: *Command*. /// /// Errors: ClearOrigin, /// Mutate the origin to some interior location. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: DescendOrigin(InteriorMultiLocation), @@ -567,7 +567,7 @@ pub enum Instruction { /// /// - `response_info`: Information for making the response. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: ReportError(QueryResponseInfo), @@ -578,7 +578,7 @@ pub enum Instruction { /// - `assets`: The asset(s) to remove from holding. /// - `beneficiary`: The new owner for the assets. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: DepositAsset { assets: MultiAssetFilter, beneficiary: MultiLocation }, @@ -596,7 +596,7 @@ pub enum Instruction { /// - `xcm`: The orders that should follow the `ReserveAssetDeposited` instruction which is /// sent onwards to `dest`. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: DepositReserveAsset { assets: MultiAssetFilter, dest: MultiLocation, xcm: Xcm<()> }, @@ -613,7 +613,7 @@ pub enum Instruction { /// and receive accordingly more. If `false`, then prefer to give as little as possible in /// order to receive as little as possible while receiving at least `want`. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: ExchangeAsset { give: MultiAssetFilter, want: MultiAssets, maximal: bool }, @@ -629,7 +629,7 @@ pub enum Instruction { /// - `xcm`: The instructions to execute on the assets once withdrawn *on the reserve /// location*. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: InitiateReserveWithdraw { assets: MultiAssetFilter, reserve: MultiLocation, xcm: Xcm<()> }, @@ -645,7 +645,7 @@ pub enum Instruction { /// NOTE: The `dest` location *MUST* respect this origin as a valid teleportation origin for /// all `assets`. If it does not, then the assets may be lost. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: InitiateTeleport { assets: MultiAssetFilter, dest: MultiLocation, xcm: Xcm<()> }, @@ -659,7 +659,7 @@ pub enum Instruction { /// will be, asset-wise, *the lesser of this value and the holding register*. No wildcards /// will be used when reporting assets back. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: ReportHolding { response_info: QueryResponseInfo, assets: MultiAssetFilter }, @@ -672,14 +672,14 @@ pub enum Instruction { /// expected maximum weight of the total XCM to be executed for the /// `AllowTopLevelPaidExecutionFrom` barrier to allow the XCM be executed. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: BuyExecution { fees: MultiAsset, weight_limit: WeightLimit }, /// Refund any surplus weight previously bought with `BuyExecution`. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: None. RefundSurplus, @@ -695,7 +695,7 @@ pub enum Instruction { /// weight however includes only the difference between the previous handler and the new /// handler, which can reasonably be negative, which would result in a surplus. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: None. SetErrorHandler(Xcm), @@ -711,14 +711,14 @@ pub enum Instruction { /// weight however includes only the difference between the previous appendix and the new /// appendix, which can reasonably be negative, which would result in a surplus. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: None. SetAppendix(Xcm), /// Clear the Error Register. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: None. ClearError, @@ -730,14 +730,14 @@ pub enum Instruction { /// - `ticket`: The ticket of the asset; this is an abstract identifier to help locate the /// asset. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: ClaimAsset { assets: MultiAssets, ticket: MultiLocation }, /// Always throws an error of type `Trap`. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: /// - `Trap`: All circumstances, whose inner value is the same as this item's inner value. @@ -752,7 +752,7 @@ pub enum Instruction { /// is sent as a reply may take to execute. NOTE: If this is unexpectedly large then the /// response may not execute at all. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: *Fallible* SubscribeVersion { @@ -763,7 +763,7 @@ pub enum Instruction { /// Cancel the effect of a previous `SubscribeVersion` instruction. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: *Fallible* UnsubscribeVersion, @@ -774,14 +774,14 @@ pub enum Instruction { /// error if the Holding does not contain the assets (to make this an error, use `ExpectAsset` /// prior). /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: *Infallible* BurnAsset(MultiAssets), /// Throw an error if Holding does not contain at least the given assets. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: /// - `ExpectationFalse`: If Holding Register does not contain the assets in the parameter. @@ -789,7 +789,7 @@ pub enum Instruction { /// Ensure that the Origin Register equals some given value and throw an error if not. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: /// - `ExpectationFalse`: If Origin Register is not equal to the parameter. @@ -797,7 +797,7 @@ pub enum Instruction { /// Ensure that the Error Register equals some given value and throw an error if not. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: /// - `ExpectationFalse`: If the value of the Error Register is not equal to the parameter. @@ -806,7 +806,7 @@ pub enum Instruction { /// Ensure that the Transact Status Register equals some given value and throw an error if /// not. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: /// - `ExpectationFalse`: If the value of the Transact Status Register is not equal to the @@ -824,7 +824,7 @@ pub enum Instruction { /// /// Safety: No concerns. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: *Fallible*. QueryPallet { module_name: Vec, response_info: QueryResponseInfo }, @@ -843,7 +843,7 @@ pub enum Instruction { /// /// Safety: No concerns. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: /// - `ExpectationFalse`: In case any of the expectations are broken. @@ -866,7 +866,7 @@ pub enum Instruction { /// /// Safety: No concerns. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: *Fallible*. ReportTransactStatus(QueryResponseInfo), @@ -875,7 +875,7 @@ pub enum Instruction { /// /// Safety: No concerns. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: *Infallible*. ClearTransactStatus, @@ -890,7 +890,7 @@ pub enum Instruction { /// The `Junction` parameter should generally be a `GlobalConsensus` variant since it is only /// these which are children of the Universal Ancestor. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: *Fallible*. UniversalOrigin(Junction), @@ -911,7 +911,7 @@ pub enum Instruction { /// `destination: X1(Parachain(1000))`. Alternatively, to export a message for execution on /// Polkadot, you would call with `network: NetworkId:: Polkadot` and `destination: Here`. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: *Fallible*. ExportMessage { network: NetworkId, destination: InteriorMultiLocation, xcm: Xcm<()> }, @@ -927,7 +927,7 @@ pub enum Instruction { /// - `unlocker`: The value which the Origin must be for a corresponding `UnlockAsset` /// instruction to work. /// - /// Kind: *Instruction*. + /// Kind: *Command*. /// /// Errors: LockAsset { asset: MultiAsset, unlocker: MultiLocation }, @@ -940,7 +940,7 @@ pub enum Instruction { /// /// Safety: No concerns. /// - /// Kind: *Instruction*. + /// Kind: *Command*. /// /// Errors: UnlockAsset { asset: MultiAsset, target: MultiLocation }, @@ -969,7 +969,7 @@ pub enum Instruction { /// - `locker`: The location from which a previous `NoteUnlockable` was sent and to which an /// `UnlockAsset` should be sent. /// - /// Kind: *Instruction*. + /// Kind: *Command*. /// /// Errors: RequestUnlock { asset: MultiAsset, locker: MultiLocation }, @@ -979,7 +979,7 @@ pub enum Instruction { /// - `jit_withdraw`: The fees mode item; if set to `true` then fees for any instructions are /// withdrawn as needed using the same mechanism as `WithdrawAssets`. /// - /// Kind: *Instruction*. + /// Kind: *Command*. /// /// Errors: SetFeesMode { jit_withdraw: bool }, @@ -992,21 +992,21 @@ pub enum Instruction { /// /// Safety: No concerns. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: SetTopic([u8; 32]), /// Clear the Topic Register. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: None. ClearTopic, /// Alter the current Origin to another given origin. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: If the existing state would not allow such a change. AliasOrigin(MultiLocation), From 4f47d3ca65f448cfb5da273cce6551e155e7d6f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 15 Aug 2023 10:51:27 +0200 Subject: [PATCH 35/35] Remove superflous parameter `overseer_enable_anyways` and make parachain node type more explicit (#7617) * Remove superflous parameter `overseer_enable_anyways` We don't need this flag, as we don't need the overseer enabled when the node isn't a collator or validator. * Rename `IsCollator` to `IsParachainNode` `IsParachainNode` is more expressive and also encapsulates the state of the parachain node being a full node. Some functionality like the overseer needs to run always when the node runs alongside a parachain node. The parachain node needs the overseer to e.g. recover PoVs. Other things like candidate validation or pvf checking are only required for when the node is running as validator. * FMT * Fix CI --- cli/src/command.rs | 3 +- node/core/approval-voting/src/lib.rs | 2 +- node/core/pvf/execute-worker/src/lib.rs | 4 +- node/network/approval-distribution/src/lib.rs | 14 +- .../approval-distribution/src/tests.rs | 4 +- node/network/collator-protocol/src/lib.rs | 28 ++-- node/network/gossip-support/src/lib.rs | 3 +- .../statement-distribution/src/responder.rs | 4 +- node/service/src/lib.rs | 146 ++++++++++-------- node/service/src/overseer.rs | 13 +- node/test/service/src/lib.rs | 11 +- .../adder/collator/src/main.rs | 9 +- .../undying/collator/src/main.rs | 9 +- runtime/parachains/src/configuration.rs | 19 ++- runtime/parachains/src/paras_inherent/mod.rs | 4 +- .../parachains/src/runtime_api_impl/mod.rs | 5 +- 16 files changed, 148 insertions(+), 130 deletions(-) diff --git a/cli/src/command.rs b/cli/src/command.rs index c75f96ee2ebf..dcffa09aaf91 100644 --- a/cli/src/command.rs +++ b/cli/src/command.rs @@ -287,14 +287,13 @@ where let task_manager = service::build_full( config, service::NewFullParams { - is_collator: service::IsCollator::No, + is_parachain_node: service::IsParachainNode::No, grandpa_pause, jaeger_agent, telemetry_worker_handle: None, node_version, workers_path: cli.run.workers_path, workers_names: None, - overseer_enable_anyways: false, overseer_gen, overseer_message_channel_capacity_override: cli .run diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index 7e29e64c400a..b29e47b4c435 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -2253,7 +2253,7 @@ where // // 1. This is not a local approval, as we don't store anything new in the approval entry. // 2. The candidate is not newly approved, as we haven't altered the approval entry's - // approved flag with `mark_approved` above. + // approved flag with `mark_approved` above. // 3. The approver, if any, had already approved the candidate, as we haven't altered the // bitfield. if transition.is_local_approval() || newly_approved || !already_approved_by.unwrap_or(true) diff --git a/node/core/pvf/execute-worker/src/lib.rs b/node/core/pvf/execute-worker/src/lib.rs index 6f632a0ae95e..7a14de18a82f 100644 --- a/node/core/pvf/execute-worker/src/lib.rs +++ b/node/core/pvf/execute-worker/src/lib.rs @@ -55,8 +55,8 @@ use tokio::{io, net::UnixStream}; // // There are quirks to that configuration knob: // -// 1. It only limits the amount of stack space consumed by wasm but does not ensure nor check -// that the stack space is actually available. +// 1. It only limits the amount of stack space consumed by wasm but does not ensure nor check that +// the stack space is actually available. // // That means, if the calling thread has 1 MiB of stack space left and the wasm code consumes // more, then the wasmtime limit will **not** trigger. Instead, the wasm code will hit the diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index 803a56251495..b94ebb282219 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -1319,13 +1319,13 @@ impl State { } // Here we're leaning on a few behaviors of assignment propagation: - // 1. At this point, the only peer we're aware of which has the approval - // message is the source peer. - // 2. We have sent the assignment message to every peer in the required routing - // which is aware of this block _unless_ the peer we originally received the - // assignment from was part of the required routing. In that case, we've sent - // the assignment to all aware peers in the required routing _except_ the original - // source of the assignment. Hence the `in_topology_check`. + // 1. At this point, the only peer we're aware of which has the approval message is + // the source peer. + // 2. We have sent the assignment message to every peer in the required routing which + // is aware of this block _unless_ the peer we originally received the assignment + // from was part of the required routing. In that case, we've sent the assignment + // to all aware peers in the required routing _except_ the original source of the + // assignment. Hence the `in_topology_check`. // 3. Any randomly selected peers have been sent the assignment already. let in_topology = topology .map_or(false, |t| t.local_grid_neighbors().route_to_peer(required_routing, peer)); diff --git a/node/network/approval-distribution/src/tests.rs b/node/network/approval-distribution/src/tests.rs index 979f0ada4ee6..422157a1eda9 100644 --- a/node/network/approval-distribution/src/tests.rs +++ b/node/network/approval-distribution/src/tests.rs @@ -463,8 +463,8 @@ fn delay_reputation_change() { /// /// /// 1. Send a view update that removes block B from their view. -/// 2. Send a message from B that they incur `COST_UNEXPECTED_MESSAGE` for, -/// but then they receive `BENEFIT_VALID_MESSAGE`. +/// 2. Send a message from B that they incur `COST_UNEXPECTED_MESSAGE` for, but then they receive +/// `BENEFIT_VALID_MESSAGE`. /// 3. Send all other messages related to B. #[test] fn spam_attack_results_in_negative_reputation_change() { diff --git a/node/network/collator-protocol/src/lib.rs b/node/network/collator-protocol/src/lib.rs index 8e710a26ad71..68d882be6fa1 100644 --- a/node/network/collator-protocol/src/lib.rs +++ b/node/network/collator-protocol/src/lib.rs @@ -37,7 +37,7 @@ use polkadot_node_network_protocol::{ }; use polkadot_primitives::CollatorPair; -use polkadot_node_subsystem::{errors::SubsystemError, overseer, SpawnedSubsystem}; +use polkadot_node_subsystem::{errors::SubsystemError, overseer, DummySubsystem, SpawnedSubsystem}; mod error; @@ -82,6 +82,8 @@ pub enum ProtocolSide { IncomingRequestReceiver, collator_side::Metrics, ), + /// No protocol side, just disable it. + None, } /// The collator protocol subsystem. @@ -98,24 +100,22 @@ impl CollatorProtocolSubsystem { pub fn new(protocol_side: ProtocolSide) -> Self { Self { protocol_side } } - - async fn run(self, ctx: Context) -> std::result::Result<(), error::FatalError> { - match self.protocol_side { - ProtocolSide::Validator { keystore, eviction_policy, metrics } => - validator_side::run(ctx, keystore, eviction_policy, metrics).await, - ProtocolSide::Collator(local_peer_id, collator_pair, req_receiver, metrics) => - collator_side::run(ctx, local_peer_id, collator_pair, req_receiver, metrics).await, - } - } } #[overseer::subsystem(CollatorProtocol, error=SubsystemError, prefix=self::overseer)] impl CollatorProtocolSubsystem { fn start(self, ctx: Context) -> SpawnedSubsystem { - let future = self - .run(ctx) - .map_err(|e| SubsystemError::with_origin("collator-protocol", e)) - .boxed(); + let future = match self.protocol_side { + ProtocolSide::Validator { keystore, eviction_policy, metrics } => + validator_side::run(ctx, keystore, eviction_policy, metrics) + .map_err(|e| SubsystemError::with_origin("collator-protocol", e)) + .boxed(), + ProtocolSide::Collator(local_peer_id, collator_pair, req_receiver, metrics) => + collator_side::run(ctx, local_peer_id, collator_pair, req_receiver, metrics) + .map_err(|e| SubsystemError::with_origin("collator-protocol", e)) + .boxed(), + ProtocolSide::None => return DummySubsystem.start(ctx), + }; SpawnedSubsystem { name: "collator-protocol-subsystem", future } } diff --git a/node/network/gossip-support/src/lib.rs b/node/network/gossip-support/src/lib.rs index 3c178ad9dfa5..b92aa4e9fe39 100644 --- a/node/network/gossip-support/src/lib.rs +++ b/node/network/gossip-support/src/lib.rs @@ -183,8 +183,7 @@ where } /// 1. Determine if the current session index has changed. - /// 2. If it has, determine relevant validators - /// and issue a connection request. + /// 2. If it has, determine relevant validators and issue a connection request. async fn handle_active_leaves( &mut self, sender: &mut impl overseer::GossipSupportSenderTrait, diff --git a/node/network/statement-distribution/src/responder.rs b/node/network/statement-distribution/src/responder.rs index 4dad10eb5e4f..68976436039d 100644 --- a/node/network/statement-distribution/src/responder.rs +++ b/node/network/statement-distribution/src/responder.rs @@ -62,8 +62,8 @@ pub async fn respond( // // 1. We want some requesters to have full data fast, rather then lots of them having them // late, as each requester having the data will help distributing it. - // 2. If we take too long, the requests timing out will not yet have had any data sent, - // thus we wasted no bandwidth. + // 2. If we take too long, the requests timing out will not yet have had any data sent, thus + // we wasted no bandwidth. // 3. If the queue is full, requestes will get an immediate error instead of running in a // timeout, thus requesters can immediately try another peer and be faster. // diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index d42c737330cd..dab69473c6ba 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -627,7 +627,7 @@ where #[cfg(feature = "full-node")] pub struct NewFullParams { - pub is_collator: IsCollator, + pub is_parachain_node: IsParachainNode, pub grandpa_pause: Option<(u32, u32)>, pub jaeger_agent: Option, pub telemetry_worker_handle: Option, @@ -638,7 +638,6 @@ pub struct NewFullParams { pub workers_path: Option, /// Optional custom names for the prepare and execute workers. pub workers_names: Option<(String, String)>, - pub overseer_enable_anyways: bool, pub overseer_gen: OverseerGenerator, pub overseer_message_channel_capacity_override: Option, #[allow(dead_code)] @@ -657,32 +656,46 @@ pub struct NewFull { pub backend: Arc, } -/// Is this node a collator? +/// Is this node running as in-process node for a parachain node? #[cfg(feature = "full-node")] #[derive(Clone)] -pub enum IsCollator { - /// This node is a collator. - Yes(CollatorPair), - /// This node is not a collator. +pub enum IsParachainNode { + /// This node is running as in-process node for a parachain collator. + Collator(CollatorPair), + /// This node is running as in-process node for a parachain full node. + FullNode, + /// This node is not running as in-process node for a parachain node, aka a normal relay chain + /// node. No, } #[cfg(feature = "full-node")] -impl std::fmt::Debug for IsCollator { +impl std::fmt::Debug for IsParachainNode { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { use sp_core::Pair; match self { - IsCollator::Yes(pair) => write!(fmt, "Yes({})", pair.public()), - IsCollator::No => write!(fmt, "No"), + IsParachainNode::Collator(pair) => write!(fmt, "Collator({})", pair.public()), + IsParachainNode::FullNode => write!(fmt, "FullNode"), + IsParachainNode::No => write!(fmt, "No"), } } } #[cfg(feature = "full-node")] -impl IsCollator { - /// Is this a collator? +impl IsParachainNode { + /// Is this running alongside a collator? fn is_collator(&self) -> bool { - matches!(self, Self::Yes(_)) + matches!(self, Self::Collator(_)) + } + + /// Is this running alongside a full node? + fn is_full_node(&self) -> bool { + matches!(self, Self::FullNode) + } + + /// Is this node running alongside a relay chain node? + fn is_running_alongside_parachain_node(&self) -> bool { + self.is_collator() || self.is_full_node() } } @@ -696,11 +709,6 @@ pub const AVAILABILITY_CONFIG: AvailabilityConfig = AvailabilityConfig { /// This is an advanced feature and not recommended for general use. Generally, `build_full` is /// a better choice. /// -/// `overseer_enable_anyways` always enables the overseer, based on the provided -/// `OverseerGenerator`, regardless of the role the node has. The relay chain selection (longest or -/// disputes-aware) is still determined based on the role of the node. Likewise for authority -/// discovery. -/// /// `workers_path` is used to get the path to the directory where auxiliary worker binaries reside. /// If not specified, the main binary's directory is searched first, then `/usr/lib/polkadot` is /// searched. If the path points to an executable rather then directory, that executable is used @@ -709,14 +717,13 @@ pub const AVAILABILITY_CONFIG: AvailabilityConfig = AvailabilityConfig { pub fn new_full( mut config: Configuration, NewFullParams { - is_collator, + is_parachain_node, grandpa_pause, jaeger_agent, telemetry_worker_handle, node_version, workers_path, workers_names, - overseer_enable_anyways, overseer_gen, overseer_message_channel_capacity_override, malus_finality_delay: _malus_finality_delay, @@ -768,8 +775,9 @@ pub fn new_full( let chain_spec = config.chain_spec.cloned_box(); let keystore = basics.keystore_container.local_keystore(); - let auth_or_collator = role.is_authority() || is_collator.is_collator(); - let pvf_checker_enabled = role.is_authority() && !is_collator.is_collator(); + let auth_or_collator = role.is_authority() || is_parachain_node.is_collator(); + // We only need to enable the pvf checker when this is a validator. + let pvf_checker_enabled = role.is_authority(); let select_chain = if auth_or_collator { let metrics = @@ -832,7 +840,12 @@ pub fn new_full( let peerset_protocol_names = PeerSetProtocolNames::new(genesis_hash, config.chain_spec.fork_id()); - if auth_or_collator || overseer_enable_anyways { + // If this is a validator or running alongside a parachain node, we need to enable the + // networking protocols. + // + // Collators and parachain full nodes require the collator and validator networking to send + // collations and to be able to recover PoVs. + if role.is_authority() || is_parachain_node.is_running_alongside_parachain_node() { use polkadot_network_bridge::{peer_sets_info, IsAuthority}; let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No }; for config in peer_sets_info(is_authority, &peerset_protocol_names) { @@ -910,7 +923,7 @@ pub fn new_full( slot_duration_millis: slot_duration.as_millis() as u64, }; - let candidate_validation_config = if role.is_authority() && !is_collator.is_collator() { + let candidate_validation_config = if role.is_authority() { let (prep_worker_path, exec_worker_path) = workers::determine_workers_paths(workers_path, workers_names, node_version.clone())?; log::info!("🚀 Using prepare-worker binary at: {:?}", prep_worker_path); @@ -979,46 +992,50 @@ pub fn new_full( let overseer_client = client.clone(); let spawner = task_manager.spawn_handle(); - let authority_discovery_service = if auth_or_collator || overseer_enable_anyways { - use futures::StreamExt; - use sc_network::{Event, NetworkEventStream}; + let authority_discovery_service = + // We need the authority discovery if this node is either a validator or running alongside a parachain node. + // Parachains node require the authority discovery for finding relay chain validators for sending + // their PoVs or recovering PoVs. + if role.is_authority() || is_parachain_node.is_running_alongside_parachain_node() { + use futures::StreamExt; + use sc_network::{Event, NetworkEventStream}; - let authority_discovery_role = if role.is_authority() { - sc_authority_discovery::Role::PublishAndDiscover(keystore_container.keystore()) + let authority_discovery_role = if role.is_authority() { + sc_authority_discovery::Role::PublishAndDiscover(keystore_container.keystore()) + } else { + // don't publish our addresses when we're not an authority (collator, cumulus, ..) + sc_authority_discovery::Role::Discover + }; + let dht_event_stream = + network.event_stream("authority-discovery").filter_map(|e| async move { + match e { + Event::Dht(e) => Some(e), + _ => None, + } + }); + let (worker, service) = sc_authority_discovery::new_worker_and_service_with_config( + sc_authority_discovery::WorkerConfig { + publish_non_global_ips: auth_disc_publish_non_global_ips, + // Require that authority discovery records are signed. + strict_record_validation: true, + ..Default::default() + }, + client.clone(), + network.clone(), + Box::pin(dht_event_stream), + authority_discovery_role, + prometheus_registry.clone(), + ); + + task_manager.spawn_handle().spawn( + "authority-discovery-worker", + Some("authority-discovery"), + Box::pin(worker.run()), + ); + Some(service) } else { - // don't publish our addresses when we're not an authority (collator, cumulus, ..) - sc_authority_discovery::Role::Discover + None }; - let dht_event_stream = - network.event_stream("authority-discovery").filter_map(|e| async move { - match e { - Event::Dht(e) => Some(e), - _ => None, - } - }); - let (worker, service) = sc_authority_discovery::new_worker_and_service_with_config( - sc_authority_discovery::WorkerConfig { - publish_non_global_ips: auth_disc_publish_non_global_ips, - // Require that authority discovery records are signed. - strict_record_validation: true, - ..Default::default() - }, - client.clone(), - network.clone(), - Box::pin(dht_event_stream), - authority_discovery_role, - prometheus_registry.clone(), - ); - - task_manager.spawn_handle().spawn( - "authority-discovery-worker", - Some("authority-discovery"), - Box::pin(worker.run()), - ); - Some(service) - } else { - None - }; let overseer_handle = if let Some(authority_discovery_service) = authority_discovery_service { let (overseer, overseer_handle) = overseer_gen @@ -1039,7 +1056,7 @@ pub fn new_full( dispute_req_receiver, registry: prometheus_registry.as_ref(), spawner, - is_collator, + is_parachain_node, approval_voting_config, availability_config: AVAILABILITY_CONFIG, candidate_validation_config, @@ -1332,11 +1349,6 @@ pub fn new_chain_ops( /// /// The actual "flavor", aka if it will use `Polkadot`, `Rococo` or `Kusama` is determined based on /// [`IdentifyVariant`] using the chain spec. -/// -/// `overseer_enable_anyways` always enables the overseer, based on the provided -/// `OverseerGenerator`, regardless of the role the node has. The relay chain selection (longest or -/// disputes-aware) is still determined based on the role of the node. Likewise for authority -/// discovery. #[cfg(feature = "full-node")] pub fn build_full( config: Configuration, diff --git a/node/service/src/overseer.rs b/node/service/src/overseer.rs index 29122ddca162..b315d2847c07 100644 --- a/node/service/src/overseer.rs +++ b/node/service/src/overseer.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use super::{AuthorityDiscoveryApi, Block, Error, Hash, IsCollator, Registry}; +use super::{AuthorityDiscoveryApi, Block, Error, Hash, IsParachainNode, Registry}; use polkadot_node_subsystem_types::DefaultSubsystemClient; use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_core::traits::SpawnNamed; @@ -108,7 +108,7 @@ where /// Task spawner to be used throughout the overseer and the APIs it provides. pub spawner: Spawner, /// Determines the behavior of the collator. - pub is_collator: IsCollator, + pub is_parachain_node: IsParachainNode, /// Configuration for the approval voting subsystem. pub approval_voting_config: ApprovalVotingConfig, /// Configuration for the availability store subsystem. @@ -149,7 +149,7 @@ pub fn prepared_overseer_builder( dispute_req_receiver, registry, spawner, - is_collator, + is_parachain_node, approval_voting_config, availability_config, candidate_validation_config, @@ -266,14 +266,15 @@ where .chain_api(ChainApiSubsystem::new(runtime_client.clone(), Metrics::register(registry)?)) .collation_generation(CollationGenerationSubsystem::new(Metrics::register(registry)?)) .collator_protocol({ - let side = match is_collator { - IsCollator::Yes(collator_pair) => ProtocolSide::Collator( + let side = match is_parachain_node { + IsParachainNode::Collator(collator_pair) => ProtocolSide::Collator( network_service.local_peer_id(), collator_pair, collation_req_receiver, Metrics::register(registry)?, ), - IsCollator::No => ProtocolSide::Validator { + IsParachainNode::FullNode => ProtocolSide::None, + IsParachainNode::No => ProtocolSide::Validator { keystore: keystore.clone(), eviction_policy: Default::default(), metrics: Metrics::register(registry)?, diff --git a/node/test/service/src/lib.rs b/node/test/service/src/lib.rs index ed25d28d2925..932e95a7cab6 100644 --- a/node/test/service/src/lib.rs +++ b/node/test/service/src/lib.rs @@ -28,7 +28,7 @@ use polkadot_overseer::Handle; use polkadot_primitives::{Balance, CollatorPair, HeadData, Id as ParaId, ValidationCode}; use polkadot_runtime_common::BlockHashCount; use polkadot_runtime_parachains::paras::{ParaGenesisArgs, ParaKind}; -use polkadot_service::{Error, FullClient, IsCollator, NewFull, PrometheusConfig}; +use polkadot_service::{Error, FullClient, IsParachainNode, NewFull, PrometheusConfig}; use polkadot_test_runtime::{ ParasCall, ParasSudoWrapperCall, Runtime, SignedExtra, SignedPayload, SudoCall, UncheckedExtrinsic, VERSION, @@ -71,7 +71,7 @@ pub use polkadot_service::{FullBackend, GetLastTimestamp}; #[sc_tracing::logging::prefix_logs_with(config.network.node_name.as_str())] pub fn new_full( config: Configuration, - is_collator: IsCollator, + is_parachain_node: IsParachainNode, workers_path: Option, ) -> Result { let workers_path = Some(workers_path.unwrap_or_else(get_relative_workers_path_for_test)); @@ -79,14 +79,13 @@ pub fn new_full( polkadot_service::new_full( config, polkadot_service::NewFullParams { - is_collator, + is_parachain_node, grandpa_pause: None, jaeger_agent: None, telemetry_worker_handle: None, node_version: None, workers_path, workers_names: None, - overseer_enable_anyways: false, overseer_gen: polkadot_service::RealOverseerGen, overseer_message_channel_capacity_override: None, malus_finality_delay: None, @@ -207,7 +206,7 @@ pub fn run_validator_node( ) -> PolkadotTestNode { let multiaddr = config.network.listen_addresses[0].clone(); let NewFull { task_manager, client, network, rpc_handlers, overseer_handle, .. } = - new_full(config, IsCollator::No, worker_program_path) + new_full(config, IsParachainNode::No, worker_program_path) .expect("could not create Polkadot test service"); let overseer_handle = overseer_handle.expect("test node must have an overseer handle"); @@ -239,7 +238,7 @@ pub fn run_collator_node( let config = node_config(storage_update_func, tokio_handle, key, boot_nodes, false); let multiaddr = config.network.listen_addresses[0].clone(); let NewFull { task_manager, client, network, rpc_handlers, overseer_handle, .. } = - new_full(config, IsCollator::Yes(collator_pair), None) + new_full(config, IsParachainNode::Collator(collator_pair), None) .expect("could not create Polkadot test service"); let overseer_handle = overseer_handle.expect("test node must have an overseer handle"); diff --git a/parachain/test-parachains/adder/collator/src/main.rs b/parachain/test-parachains/adder/collator/src/main.rs index 8d8a13767178..f9efa9c68ad3 100644 --- a/parachain/test-parachains/adder/collator/src/main.rs +++ b/parachain/test-parachains/adder/collator/src/main.rs @@ -21,6 +21,7 @@ use polkadot_node_primitives::CollationGenerationConfig; use polkadot_node_subsystem::messages::{CollationGenerationMessage, CollatorProtocolMessage}; use polkadot_primitives::Id as ParaId; use sc_cli::{Error as SubstrateCliError, SubstrateCli}; +use sc_service::Role; use sp_core::hexdisplay::HexDisplay; use test_parachain_adder_collator::Collator; @@ -57,10 +58,15 @@ fn main() -> Result<()> { let collator = Collator::new(); config.disable_beefy = true; + // Zombienet is spawning all collators currently with the same CLI, this means it + // sets `--validator` and this is wrong here. + config.role = Role::Full; let full_node = polkadot_service::build_full( config, polkadot_service::NewFullParams { - is_collator: polkadot_service::IsCollator::Yes(collator.collator_key()), + is_parachain_node: polkadot_service::IsParachainNode::Collator( + collator.collator_key(), + ), grandpa_pause: None, jaeger_agent: None, telemetry_worker_handle: None, @@ -70,7 +76,6 @@ fn main() -> Result<()> { workers_path: None, workers_names: None, - overseer_enable_anyways: false, overseer_gen: polkadot_service::RealOverseerGen, overseer_message_channel_capacity_override: None, malus_finality_delay: None, diff --git a/parachain/test-parachains/undying/collator/src/main.rs b/parachain/test-parachains/undying/collator/src/main.rs index da8205ba1893..8eadc233ae78 100644 --- a/parachain/test-parachains/undying/collator/src/main.rs +++ b/parachain/test-parachains/undying/collator/src/main.rs @@ -21,6 +21,7 @@ use polkadot_node_primitives::CollationGenerationConfig; use polkadot_node_subsystem::messages::{CollationGenerationMessage, CollatorProtocolMessage}; use polkadot_primitives::Id as ParaId; use sc_cli::{Error as SubstrateCliError, SubstrateCli}; +use sc_service::Role; use sp_core::hexdisplay::HexDisplay; use test_parachain_undying_collator::Collator; @@ -57,10 +58,15 @@ fn main() -> Result<()> { let collator = Collator::new(cli.run.pov_size, cli.run.pvf_complexity); config.disable_beefy = true; + // Zombienet is spawning all collators currently with the same CLI, this means it + // sets `--validator` and this is wrong here. + config.role = Role::Full; let full_node = polkadot_service::build_full( config, polkadot_service::NewFullParams { - is_collator: polkadot_service::IsCollator::Yes(collator.collator_key()), + is_parachain_node: polkadot_service::IsParachainNode::Collator( + collator.collator_key(), + ), grandpa_pause: None, jaeger_agent: None, telemetry_worker_handle: None, @@ -70,7 +76,6 @@ fn main() -> Result<()> { workers_path: None, workers_names: None, - overseer_enable_anyways: false, overseer_gen: polkadot_service::RealOverseerGen, overseer_message_channel_capacity_override: None, malus_finality_delay: None, diff --git a/runtime/parachains/src/configuration.rs b/runtime/parachains/src/configuration.rs index d4ad8619f16e..0631b280aadd 100644 --- a/runtime/parachains/src/configuration.rs +++ b/runtime/parachains/src/configuration.rs @@ -1244,28 +1244,27 @@ impl Pallet { ) -> DispatchResult { let mut pending_configs = >::get(); - // 1. pending_configs = [] - // No pending configuration changes. + // 1. pending_configs = [] No pending configuration changes. // // That means we should use the active config as the base configuration. We will insert // the new pending configuration as (cur+2, new_config) into the list. // - // 2. pending_configs = [(cur+2, X)] - // There is a configuration that is pending for the scheduled session. + // 2. pending_configs = [(cur+2, X)] There is a configuration that is pending for the + // scheduled session. // // We will use X as the base configuration. We can update the pending configuration X // directly. // - // 3. pending_configs = [(cur+1, X)] - // There is a pending configuration scheduled and it will be applied in the next session. + // 3. pending_configs = [(cur+1, X)] There is a pending configuration scheduled and it will + // be applied in the next session. // // We will use X as the base configuration. We need to schedule a new configuration // change for the `scheduled_session` and use X as the base for the new configuration. // - // 4. pending_configs = [(cur+1, X), (cur+2, Y)] - // There is a pending configuration change in the next session and for the scheduled - // session. Due to case â„–3, we can be sure that Y is based on top of X. This means we - // can use Y as the base configuration and update Y directly. + // 4. pending_configs = [(cur+1, X), (cur+2, Y)] There is a pending configuration change in + // the next session and for the scheduled session. Due to case â„–3, we can be sure that Y + // is based on top of X. This means we can use Y as the base configuration and update Y + // directly. // // There cannot be (cur, X) because those are applied in the session change handler for the // current session. diff --git a/runtime/parachains/src/paras_inherent/mod.rs b/runtime/parachains/src/paras_inherent/mod.rs index a40a3422a669..da0b972bc92c 100644 --- a/runtime/parachains/src/paras_inherent/mod.rs +++ b/runtime/parachains/src/paras_inherent/mod.rs @@ -977,8 +977,8 @@ fn compute_entropy(parent_hash: T::Hash) -> [u8; 32] { /// 2. If exceeded: /// 1. Check validity of all dispute statements sequentially /// 2. If not exceeded: -/// 1. If weight is exceeded by locals, pick the older ones (lower indices) -/// until the weight limit is reached. +/// 1. If weight is exceeded by locals, pick the older ones (lower indices) until the weight limit +/// is reached. /// /// Returns the consumed weight amount, that is guaranteed to be less than the provided /// `max_consumable_weight`. diff --git a/runtime/parachains/src/runtime_api_impl/mod.rs b/runtime/parachains/src/runtime_api_impl/mod.rs index e22ef825858d..e066ad825a33 100644 --- a/runtime/parachains/src/runtime_api_impl/mod.rs +++ b/runtime/parachains/src/runtime_api_impl/mod.rs @@ -23,8 +23,7 @@ //! will contain methods from `vstaging`. //! The promotion consists of the following steps: //! 1. Bump the version of the stable module (e.g. `v2` becomes `v3`) -//! 2. Move methods from `vstaging` to `v3`. The new stable version should include -//! all methods from `vstaging` tagged with the new version number (e.g. all -//! `v3` methods). +//! 2. Move methods from `vstaging` to `v3`. The new stable version should include all methods from +//! `vstaging` tagged with the new version number (e.g. all `v3` methods). pub mod v5; pub mod vstaging;