diff --git a/polkadot/node/subsystem-bench/src/approval/message_generator.rs b/polkadot/node/subsystem-bench/src/approval/message_generator.rs
index 4318dcdf8902..0d308a2fecc0 100644
--- a/polkadot/node/subsystem-bench/src/approval/message_generator.rs
+++ b/polkadot/node/subsystem-bench/src/approval/message_generator.rs
@@ -62,10 +62,11 @@ use crate::{
GeneratedState, BUFFER_FOR_GENERATION_MILLIS, LOG_TARGET, SLOT_DURATION_MILLIS,
},
core::{
- configuration::{TestAuthorities, TestConfiguration, TestObjective},
+ configuration::{TestAuthorities, TestConfiguration},
mock::session_info_for_peers,
NODE_UNDER_TEST,
},
+ TestObjective,
};
use polkadot_node_network_protocol::v3 as protocol_v3;
use polkadot_primitives::Hash;
diff --git a/polkadot/node/subsystem-bench/src/approval/mod.rs b/polkadot/node/subsystem-bench/src/approval/mod.rs
index 055aeb193456..b1ab53638701 100644
--- a/polkadot/node/subsystem-bench/src/approval/mod.rs
+++ b/polkadot/node/subsystem-bench/src/approval/mod.rs
@@ -28,7 +28,7 @@ use crate::{
mock_chain_selection::MockChainSelection,
},
core::{
- configuration::{TestAuthorities, TestConfiguration},
+ configuration::TestAuthorities,
environment::{
BenchmarkUsage, TestEnvironment, TestEnvironmentDependencies, MAX_TIME_OF_FLIGHT,
},
@@ -43,6 +43,7 @@ use crate::{
},
NODE_UNDER_TEST,
},
+ TestConfiguration,
};
use colored::Colorize;
use futures::channel::oneshot;
diff --git a/polkadot/node/subsystem-bench/src/availability/mod.rs b/polkadot/node/subsystem-bench/src/availability/mod.rs
index 56ec6705b7e3..8ed39525a1e3 100644
--- a/polkadot/node/subsystem-bench/src/availability/mod.rs
+++ b/polkadot/node/subsystem-bench/src/availability/mod.rs
@@ -67,7 +67,7 @@ use super::core::{configuration::TestConfiguration, mock::dummy_builder, network
const LOG_TARGET: &str = "subsystem-bench::availability";
-use super::{cli::TestObjective, core::mock::AlwaysSupportsParachains};
+use super::{core::mock::AlwaysSupportsParachains, TestObjective};
use polkadot_node_subsystem_test_helpers::{
derive_erasure_chunks_with_proofs_and_root, mock::new_block_import_info,
};
diff --git a/polkadot/node/subsystem-bench/src/cli.rs b/polkadot/node/subsystem-bench/src/cli.rs
deleted file mode 100644
index 21f5e6a85629..000000000000
--- a/polkadot/node/subsystem-bench/src/cli.rs
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright (C) Parity Technologies (UK) Ltd.
-// This file is part of Polkadot.
-
-// Polkadot is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// Polkadot is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-
-// You should have received a copy of the GNU General Public License
-// along with Polkadot. If not, see .
-use super::availability::DataAvailabilityReadOptions;
-use crate::approval::ApprovalsOptions;
-use serde::{Deserialize, Serialize};
-
-#[derive(Debug, Clone, Serialize, Deserialize, clap::Parser)]
-#[clap(rename_all = "kebab-case")]
-#[allow(missing_docs)]
-pub struct TestSequenceOptions {
- #[clap(short, long, ignore_case = true)]
- pub path: String,
-}
-
-/// Supported test objectives
-#[derive(Debug, Clone, clap::Parser, Serialize, Deserialize)]
-#[command(rename_all = "kebab-case")]
-pub enum TestObjective {
- /// Benchmark availability recovery strategies.
- DataAvailabilityRead(DataAvailabilityReadOptions),
- /// Benchmark availability and bitfield distribution.
- DataAvailabilityWrite,
- /// Run a test sequence specified in a file
- TestSequence(TestSequenceOptions),
- /// Benchmark the approval-voting and approval-distribution subsystems.
- ApprovalVoting(ApprovalsOptions),
- Unimplemented,
-}
-
-impl std::fmt::Display for TestObjective {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- write!(
- f,
- "{}",
- match self {
- Self::DataAvailabilityRead(_) => "DataAvailabilityRead",
- Self::DataAvailabilityWrite => "DataAvailabilityWrite",
- Self::TestSequence(_) => "TestSequence",
- Self::ApprovalVoting(_) => "ApprovalVoting",
- Self::Unimplemented => "Unimplemented",
- }
- )
- }
-}
-
-#[derive(Debug, clap::Parser)]
-#[clap(rename_all = "kebab-case")]
-#[allow(missing_docs)]
-pub struct StandardTestOptions {
- #[clap(long, ignore_case = true, default_value_t = 100)]
- /// Number of cores to fetch availability for.
- pub n_cores: usize,
-
- #[clap(long, ignore_case = true, default_value_t = 500)]
- /// Number of validators to fetch chunks from.
- pub n_validators: usize,
-
- #[clap(long, ignore_case = true, default_value_t = 5120)]
- /// The minimum pov size in KiB
- pub min_pov_size: usize,
-
- #[clap(long, ignore_case = true, default_value_t = 5120)]
- /// The maximum pov size bytes
- pub max_pov_size: usize,
-
- #[clap(short, long, ignore_case = true, default_value_t = 1)]
- /// The number of blocks the test is going to run.
- pub num_blocks: usize,
-}
diff --git a/polkadot/node/subsystem-bench/src/core/configuration.rs b/polkadot/node/subsystem-bench/src/core/configuration.rs
index 0c8a78c504c8..d9eec43873aa 100644
--- a/polkadot/node/subsystem-bench/src/core/configuration.rs
+++ b/polkadot/node/subsystem-bench/src/core/configuration.rs
@@ -22,7 +22,7 @@ use sc_network::PeerId;
use sp_consensus_babe::AuthorityId;
use std::{collections::HashMap, path::Path};
-pub use crate::cli::TestObjective;
+use crate::TestObjective;
use polkadot_primitives::{AssignmentId, AuthorityDiscoveryId, ValidatorId};
use rand::thread_rng;
use rand_distr::{Distribution, Normal, Uniform};
@@ -240,95 +240,6 @@ impl TestConfiguration {
peer_id_to_authority,
}
}
-
- /// An unconstrained standard configuration matching Polkadot/Kusama
- pub fn ideal_network(
- objective: TestObjective,
- num_blocks: usize,
- n_validators: usize,
- n_cores: usize,
- min_pov_size: usize,
- max_pov_size: usize,
- ) -> TestConfiguration {
- Self {
- objective,
- n_cores,
- n_validators,
- max_validators_per_core: 5,
- pov_sizes: generate_pov_sizes(n_cores, min_pov_size, max_pov_size),
- bandwidth: 50 * 1024 * 1024,
- peer_bandwidth: 50 * 1024 * 1024,
- // No latency
- latency: None,
- num_blocks,
- min_pov_size,
- max_pov_size,
- connectivity: 100,
- needed_approvals: default_needed_approvals(),
- n_delay_tranches: default_n_delay_tranches(),
- no_show_slots: default_no_show_slots(),
- relay_vrf_modulo_samples: default_relay_vrf_modulo_samples(),
- zeroth_delay_tranche_width: default_zeroth_delay_tranche_width(),
- }
- }
-
- pub fn healthy_network(
- objective: TestObjective,
- num_blocks: usize,
- n_validators: usize,
- n_cores: usize,
- min_pov_size: usize,
- max_pov_size: usize,
- ) -> TestConfiguration {
- Self {
- objective,
- n_cores,
- n_validators,
- max_validators_per_core: 5,
- pov_sizes: generate_pov_sizes(n_cores, min_pov_size, max_pov_size),
- bandwidth: 50 * 1024 * 1024,
- peer_bandwidth: 50 * 1024 * 1024,
- latency: Some(PeerLatency { mean_latency_ms: 50, std_dev: 12.5 }),
- num_blocks,
- min_pov_size,
- max_pov_size,
- connectivity: 95,
- needed_approvals: default_needed_approvals(),
- n_delay_tranches: default_n_delay_tranches(),
- no_show_slots: default_no_show_slots(),
- relay_vrf_modulo_samples: default_relay_vrf_modulo_samples(),
- zeroth_delay_tranche_width: default_zeroth_delay_tranche_width(),
- }
- }
-
- pub fn degraded_network(
- objective: TestObjective,
- num_blocks: usize,
- n_validators: usize,
- n_cores: usize,
- min_pov_size: usize,
- max_pov_size: usize,
- ) -> TestConfiguration {
- Self {
- objective,
- n_cores,
- n_validators,
- max_validators_per_core: 5,
- pov_sizes: generate_pov_sizes(n_cores, min_pov_size, max_pov_size),
- bandwidth: 50 * 1024 * 1024,
- peer_bandwidth: 50 * 1024 * 1024,
- latency: Some(PeerLatency { mean_latency_ms: 150, std_dev: 40.0 }),
- num_blocks,
- min_pov_size,
- max_pov_size,
- connectivity: 67,
- needed_approvals: default_needed_approvals(),
- n_delay_tranches: default_n_delay_tranches(),
- no_show_slots: default_no_show_slots(),
- relay_vrf_modulo_samples: default_relay_vrf_modulo_samples(),
- zeroth_delay_tranche_width: default_zeroth_delay_tranche_width(),
- }
- }
}
/// Sample latency (in milliseconds) from a normal distribution with parameters
diff --git a/polkadot/node/subsystem-bench/src/subsystem-bench.rs b/polkadot/node/subsystem-bench/src/subsystem-bench.rs
index 433354f6525d..a5dfbc52d606 100644
--- a/polkadot/node/subsystem-bench/src/subsystem-bench.rs
+++ b/polkadot/node/subsystem-bench/src/subsystem-bench.rs
@@ -17,6 +17,7 @@
//! A tool for running subsystem benchmark tests designed for development and
//! CI regression testing.
use clap::Parser;
+use serde::{Deserialize, Serialize};
use colored::Colorize;
@@ -28,24 +29,23 @@ use std::path::Path;
pub(crate) mod approval;
pub(crate) mod availability;
-pub(crate) mod cli;
pub(crate) mod core;
mod valgrind;
const LOG_TARGET: &str = "subsystem-bench";
use availability::{prepare_test, NetworkEmulation, TestState};
-use cli::TestObjective;
+use approval::{bench_approvals, ApprovalsOptions};
+use availability::DataAvailabilityReadOptions;
use core::{
configuration::TestConfiguration,
+ display::display_configuration,
environment::{TestEnvironment, GENESIS_HASH},
};
use clap_num::number_range;
-use crate::{approval::bench_approvals, core::display::display_configuration};
-
fn le_100(s: &str) -> Result {
number_range(s, 0, 100)
}
@@ -54,6 +54,34 @@ fn le_5000(s: &str) -> Result {
number_range(s, 0, 5000)
}
+/// Supported test objectives
+#[derive(Debug, Clone, Parser, Serialize, Deserialize)]
+#[command(rename_all = "kebab-case")]
+pub enum TestObjective {
+ /// Benchmark availability recovery strategies.
+ DataAvailabilityRead(DataAvailabilityReadOptions),
+ /// Benchmark availability and bitfield distribution.
+ DataAvailabilityWrite,
+ /// Benchmark the approval-voting and approval-distribution subsystems.
+ ApprovalVoting(ApprovalsOptions),
+ Unimplemented,
+}
+
+impl std::fmt::Display for TestObjective {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(
+ f,
+ "{}",
+ match self {
+ Self::DataAvailabilityRead(_) => "DataAvailabilityRead",
+ Self::DataAvailabilityWrite => "DataAvailabilityWrite",
+ Self::ApprovalVoting(_) => "ApprovalVoting",
+ Self::Unimplemented => "Unimplemented",
+ }
+ )
+ }
+}
+
#[derive(Debug, Parser)]
#[allow(missing_docs)]
struct BenchCli {
@@ -61,9 +89,6 @@ struct BenchCli {
/// The type of network to be emulated
pub network: NetworkEmulation,
- #[clap(flatten)]
- pub standard_configuration: cli::StandardTestOptions,
-
#[clap(short, long)]
/// The bandwidth of emulated remote peers in KiB
pub peer_bandwidth: Option,
@@ -104,42 +129,12 @@ struct BenchCli {
/// Shows the output in YAML format
pub yaml_output: bool,
- #[command(subcommand)]
- pub objective: cli::TestObjective,
+ #[arg(required = true)]
+ /// Path to the test sequence configuration file
+ pub path: String,
}
impl BenchCli {
- fn create_test_configuration(&self) -> TestConfiguration {
- let configuration = &self.standard_configuration;
-
- match self.network {
- NetworkEmulation::Healthy => TestConfiguration::healthy_network(
- self.objective.clone(),
- configuration.num_blocks,
- configuration.n_validators,
- configuration.n_cores,
- configuration.min_pov_size,
- configuration.max_pov_size,
- ),
- NetworkEmulation::Degraded => TestConfiguration::degraded_network(
- self.objective.clone(),
- configuration.num_blocks,
- configuration.n_validators,
- configuration.n_cores,
- configuration.min_pov_size,
- configuration.max_pov_size,
- ),
- NetworkEmulation::Ideal => TestConfiguration::ideal_network(
- self.objective.clone(),
- configuration.num_blocks,
- configuration.n_validators,
- configuration.n_cores,
- configuration.min_pov_size,
- configuration.max_pov_size,
- ),
- }
- }
-
fn launch(self) -> eyre::Result<()> {
let is_valgrind_running = valgrind::is_valgrind_running();
if !is_valgrind_running && self.cache_misses {
@@ -156,125 +151,56 @@ impl BenchCli {
None
};
- let mut test_config = match self.objective {
- TestObjective::TestSequence(options) => {
- let test_sequence =
- core::configuration::TestSequence::new_from_file(Path::new(&options.path))
- .expect("File exists")
- .into_vec();
- let num_steps = test_sequence.len();
- gum::info!(
- "{}",
- format!("Sequence contains {} step(s)", num_steps).bright_purple()
- );
- for (index, test_config) in test_sequence.into_iter().enumerate() {
- let benchmark_name =
- format!("{} #{} {}", &options.path, index + 1, test_config.objective);
- gum::info!(target: LOG_TARGET, "{}", format!("Step {}/{}", index + 1, num_steps).bright_purple(),);
- display_configuration(&test_config);
-
- let usage = match test_config.objective {
- TestObjective::DataAvailabilityRead(ref _opts) => {
- let mut state = TestState::new(&test_config);
- let (mut env, _protocol_config) = prepare_test(test_config, &mut state);
- env.runtime().block_on(availability::benchmark_availability_read(
- &benchmark_name,
- &mut env,
- state,
- ))
- },
- TestObjective::ApprovalVoting(ref options) => {
- let (mut env, state) =
- approval::prepare_test(test_config.clone(), options.clone());
- env.runtime().block_on(bench_approvals(
- &benchmark_name,
- &mut env,
- state,
- ))
- },
- TestObjective::DataAvailabilityWrite => {
- let mut state = TestState::new(&test_config);
- let (mut env, _protocol_config) = prepare_test(test_config, &mut state);
- env.runtime().block_on(availability::benchmark_availability_write(
- &benchmark_name,
- &mut env,
- state,
- ))
- },
- TestObjective::TestSequence(_) => todo!(),
- TestObjective::Unimplemented => todo!(),
- };
-
- let output = if self.yaml_output {
- serde_yaml::to_string(&vec![usage])?
- } else {
- usage.to_string()
- };
- println!("{}", output);
- }
-
- return Ok(())
- },
- TestObjective::DataAvailabilityRead(ref _options) => self.create_test_configuration(),
- TestObjective::DataAvailabilityWrite => self.create_test_configuration(),
- TestObjective::ApprovalVoting(_) => todo!(),
- TestObjective::Unimplemented => todo!(),
- };
-
- let mut latency_config = test_config.latency.clone().unwrap_or_default();
-
- if let Some(latency) = self.peer_mean_latency {
- latency_config.mean_latency_ms = latency;
+ let test_sequence = core::configuration::TestSequence::new_from_file(Path::new(&self.path))
+ .expect("File exists")
+ .into_vec();
+ let num_steps = test_sequence.len();
+ gum::info!("{}", format!("Sequence contains {} step(s)", num_steps).bright_purple());
+ for (index, test_config) in test_sequence.into_iter().enumerate() {
+ let benchmark_name = format!("{} #{} {}", &self.path, index + 1, test_config.objective);
+ gum::info!(target: LOG_TARGET, "{}", format!("Step {}/{}", index + 1, num_steps).bright_purple(),);
+ display_configuration(&test_config);
+
+ let usage = match test_config.objective {
+ TestObjective::DataAvailabilityRead(ref _opts) => {
+ let mut state = TestState::new(&test_config);
+ let (mut env, _protocol_config) = prepare_test(test_config, &mut state);
+ env.runtime().block_on(availability::benchmark_availability_read(
+ &benchmark_name,
+ &mut env,
+ state,
+ ))
+ },
+ TestObjective::ApprovalVoting(ref options) => {
+ let (mut env, state) =
+ approval::prepare_test(test_config.clone(), options.clone());
+ env.runtime().block_on(bench_approvals(&benchmark_name, &mut env, state))
+ },
+ TestObjective::DataAvailabilityWrite => {
+ let mut state = TestState::new(&test_config);
+ let (mut env, _protocol_config) = prepare_test(test_config, &mut state);
+ env.runtime().block_on(availability::benchmark_availability_write(
+ &benchmark_name,
+ &mut env,
+ state,
+ ))
+ },
+ TestObjective::Unimplemented => todo!(),
+ };
+
+ let output = if self.yaml_output {
+ serde_yaml::to_string(&vec![usage])?
+ } else {
+ usage.to_string()
+ };
+ println!("{}", output);
}
- if let Some(std_dev) = self.peer_latency_std_dev {
- latency_config.std_dev = std_dev;
- }
-
- // Write back the updated latency.
- test_config.latency = Some(latency_config);
-
- if let Some(connectivity) = self.connectivity {
- test_config.connectivity = connectivity;
- }
-
- if let Some(bandwidth) = self.peer_bandwidth {
- // CLI expects bw in KiB
- test_config.peer_bandwidth = bandwidth * 1024;
- }
-
- if let Some(bandwidth) = self.bandwidth {
- // CLI expects bw in KiB
- test_config.bandwidth = bandwidth * 1024;
- }
-
- display_configuration(&test_config);
-
- let mut state = TestState::new(&test_config);
- let (mut env, _protocol_config) = prepare_test(test_config, &mut state);
-
- let benchmark_name = format!("{}", self.objective);
- let usage = match self.objective {
- TestObjective::DataAvailabilityRead(_options) => env.runtime().block_on(
- availability::benchmark_availability_read(&benchmark_name, &mut env, state),
- ),
- TestObjective::DataAvailabilityWrite => env.runtime().block_on(
- availability::benchmark_availability_write(&benchmark_name, &mut env, state),
- ),
- TestObjective::TestSequence(_options) => todo!(),
- TestObjective::ApprovalVoting(_) => todo!(),
- TestObjective::Unimplemented => todo!(),
- };
-
if let Some(agent_running) = agent_running {
let agent_ready = agent_running.stop()?;
agent_ready.shutdown();
}
- let output =
- if self.yaml_output { serde_yaml::to_string(&vec![usage])? } else { usage.to_string() };
- println!("{}", output);
-
Ok(())
}
}