Skip to content

Commit

Permalink
Upgrade to polkadot-v0.9.43 (#1202)
Browse files Browse the repository at this point in the history
Co-authored-by: Xavier Lau <xavier@inv.cafe>
Co-authored-by: Guantong <i@gt.email>
Co-authored-by: bear <boundless.forest@outlook.com>
  • Loading branch information
3 people authored Jul 18, 2023
1 parent 23248a5 commit 0ea16bc
Show file tree
Hide file tree
Showing 86 changed files with 3,179 additions and 2,049 deletions.
3,186 changes: 1,870 additions & 1,316 deletions Cargo.lock

Large diffs are not rendered by default.

281 changes: 139 additions & 142 deletions Cargo.toml

Large diffs are not rendered by default.

3 changes: 0 additions & 3 deletions node/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ pangolin-runtime = { workspace = true, optional = true }
pangoro-runtime = { workspace = true, optional = true }

# frontier
fc-cli = { workspace = true }
fc-db = { workspace = true }
fc-mapping-sync = { workspace = true }
fc-rpc = { workspace = true }
Expand All @@ -52,9 +51,7 @@ fp-rpc = { workspace = true, features = ["std"] }
moonbeam-primitives-ext = { workspace = true, optional = true, features = ["std"] }
moonbeam-rpc-debug = { workspace = true }
moonbeam-rpc-primitives-debug = { workspace = true, features = ["std"] }
moonbeam-rpc-primitives-txpool = { workspace = true, features = ["std"] }
moonbeam-rpc-trace = { workspace = true }
moonbeam-rpc-txpool = { workspace = true }

# polkadot
polkadot-cli = { workspace = true }
Expand Down
53 changes: 48 additions & 5 deletions node/src/cli.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,9 +49,6 @@ pub enum Subcommand {
/// Export the genesis wasm of the parachain.
ExportGenesisWasm(cumulus_client_cli::ExportGenesisWasmCommand),

/// Db meta columns information.
FrontierDb(fc_cli::FrontierDbCmd),

/// Sub-commands concerned with benchmarking.
/// The pallet benchmarking moved to the `pallet` sub-command.
#[cfg(feature = "runtime-benchmarks")]
Expand Down Expand Up @@ -121,11 +118,26 @@ impl RelayChainCli {
) -> Self {
let extension = crate::chain_spec::Extensions::try_get(&*para_config.chain_spec);
let chain_id = extension.map(|e| e.relay_chain.clone());
let base_path = para_config.base_path.as_ref().map(|x| x.path().join("polkadot"));
Self { base_path, chain_id, base: clap::Parser::parse_from(relay_chain_args) }
let base_path = para_config.base_path.path().join("polkadot");

Self {
base_path: Some(base_path),
chain_id,
base: clap::Parser::parse_from(relay_chain_args),
}
}
}

/// Available frontier backend types.
#[derive(Debug, Copy, Clone, Default, clap::ValueEnum)]
pub enum FrontierBackendType {
/// Either RocksDb or ParityDb as per inherited from the global backend settings.
#[default]
KeyValue,
/// Sql database with custom log indexing.
Sql,
}

#[derive(Debug, clap::Parser)]
pub struct EthArgs {
/// Enable EVM tracing functionalities.
Expand Down Expand Up @@ -167,6 +179,27 @@ pub struct EthArgs {
/// Maximum fee history cache size.
#[arg(long, default_value = "2048")]
pub fee_history_limit: u64,

/// Sets the frontier backend type (KeyValue or Sql)
#[arg(long, value_enum, ignore_case = true, default_value_t = FrontierBackendType::default())]
pub frontier_backend_type: FrontierBackendType,

// Sets the SQL backend's pool size.
#[arg(long, default_value = "100")]
pub frontier_sql_backend_pool_size: u32,

/// Sets the SQL backend's query timeout in number of VM ops.
#[arg(long, default_value = "10000000")]
pub frontier_sql_backend_num_ops_timeout: u32,

/// Sets the SQL backend's auxiliary thread limit.
#[arg(long, default_value = "4")]
pub frontier_sql_backend_thread_count: u32,

/// Sets the SQL backend's query timeout in number of VM ops.
/// Default value is 200MB.
#[arg(long, default_value = "209715200")]
pub frontier_sql_backend_cache_size: u64,
}
impl EthArgs {
pub fn build_eth_rpc_config(&self) -> EthRpcConfig {
Expand All @@ -180,6 +213,11 @@ impl EthArgs {
eth_log_block_cache: self.eth_log_block_cache,
max_past_logs: self.max_past_logs,
fee_history_limit: self.fee_history_limit,
frontier_backend_type: self.frontier_backend_type,
frontier_sql_backend_pool_size: self.frontier_sql_backend_pool_size,
frontier_sql_backend_num_ops_timeout: self.frontier_sql_backend_num_ops_timeout,
frontier_sql_backend_thread_count: self.frontier_sql_backend_thread_count,
frontier_sql_backend_cache_size: self.frontier_sql_backend_cache_size,
}
}
}
Expand Down Expand Up @@ -212,4 +250,9 @@ pub struct EthRpcConfig {
pub eth_statuses_cache: usize,
pub fee_history_limit: u64,
pub max_past_logs: u32,
pub frontier_backend_type: FrontierBackendType,
pub frontier_sql_backend_pool_size: u32,
pub frontier_sql_backend_num_ops_timeout: u32,
pub frontier_sql_backend_thread_count: u32,
pub frontier_sql_backend_cache_size: u64,
}
155 changes: 50 additions & 105 deletions node/src/command.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,21 +25,19 @@ use cumulus_primitives_core::ParaId;
// darwinia
use crate::{
chain_spec::*,
cli::{Cli, RelayChainCli, Subcommand},
cli::{Cli, FrontierBackendType, RelayChainCli, Subcommand},
frontier_service,
service::{self, *},
};
use dc_primitives::Block;
// frontier
use fc_db::frontier_database_dir;
// substrate
use sc_cli::{
CliConfiguration, DefaultConfigurationValues, ImportParams, KeystoreParams, NetworkParams,
Result, RuntimeVersion, SharedParams, SubstrateCli,
};
use sc_service::{
config::{BasePath, PrometheusConfig},
ChainSpec, DatabaseSource, PartialComponents,
ChainSpec, DatabaseSource,
};
use sp_core::{
crypto::{self, Ss58AddressFormatRegistry},
Expand Down Expand Up @@ -153,14 +151,10 @@ impl DefaultConfigurationValues for RelayChainCli {
30334
}

fn rpc_ws_listen_port() -> u16 {
fn rpc_listen_port() -> u16 {
9945
}

fn rpc_http_listen_port() -> u16 {
9934
}

fn prometheus_listen_port() -> u16 {
9616
}
Expand All @@ -186,16 +180,8 @@ impl CliConfiguration<Self> for RelayChainCli {
Ok(self.shared_params().base_path()?.or_else(|| self.base_path.clone().map(Into::into)))
}

fn rpc_http(&self, default_listen_port: u16) -> Result<Option<SocketAddr>> {
self.base.base.rpc_http(default_listen_port)
}

fn rpc_ipc(&self) -> Result<Option<String>> {
self.base.base.rpc_ipc()
}

fn rpc_ws(&self, default_listen_port: u16) -> Result<Option<SocketAddr>> {
self.base.base.rpc_ws(default_listen_port)
fn rpc_addr(&self, default_listen_port: u16) -> Result<Option<SocketAddr>> {
self.base.base.rpc_addr(default_listen_port)
}

fn prometheus_config(
Expand Down Expand Up @@ -241,8 +227,8 @@ impl CliConfiguration<Self> for RelayChainCli {
self.base.base.rpc_methods()
}

fn rpc_ws_max_connections(&self) -> Result<Option<usize>> {
self.base.base.rpc_ws_max_connections()
fn rpc_max_connections(&self) -> Result<u32> {
self.base.base.rpc_max_connections()
}

fn rpc_cors(&self, is_dev: bool) -> Result<Option<Vec<String>>> {
Expand Down Expand Up @@ -341,10 +327,7 @@ pub fn run() -> Result<()> {
#[cfg(feature = "crab-native")]
if chain_spec.is_crab() {
return runner.async_run(|$config| {
let $components = service::new_partial::<
CrabRuntimeApi,
CrabRuntimeExecutor,
>(
let $components = service::new_partial::<CrabRuntimeApi, CrabRuntimeExecutor>(
&$config,
&$cli.eth_args.build_eth_rpc_config()
)?;
Expand All @@ -357,10 +340,7 @@ pub fn run() -> Result<()> {
#[cfg(feature = "darwinia-native")]
if chain_spec.is_darwinia() {
return runner.async_run(|$config| {
let $components = service::new_partial::<
DarwiniaRuntimeApi,
DarwiniaRuntimeExecutor,
>(
let $components = service::new_partial::<DarwiniaRuntimeApi, DarwiniaRuntimeExecutor>(
&$config,
&$cli.eth_args.build_eth_rpc_config()
)?;
Expand All @@ -373,10 +353,7 @@ pub fn run() -> Result<()> {
#[cfg(feature = "pangolin-native")]
if chain_spec.is_pangolin() {
return runner.async_run(|$config| {
let $components = service::new_partial::<
PangolinRuntimeApi,
PangolinRuntimeExecutor,
>(
let $components = service::new_partial::<PangolinRuntimeApi, PangolinRuntimeExecutor>(
&$config,
&$cli.eth_args.build_eth_rpc_config()
)?;
Expand All @@ -389,10 +366,7 @@ pub fn run() -> Result<()> {
#[cfg(feature = "pangoro-native")]
if chain_spec.is_pangoro() {
return runner.async_run(|$config| {
let $components = service::new_partial::<
PangoroRuntimeApi,
PangoroRuntimeExecutor,
>(
let $components = service::new_partial::<PangoroRuntimeApi, PangoroRuntimeExecutor>(
&$config,
&$cli.eth_args.build_eth_rpc_config()
)?;
Expand Down Expand Up @@ -447,22 +421,48 @@ pub fn run() -> Result<()> {

set_default_ss58_version(chain_spec);
runner.sync_run(|config| {
// Remove Frontier DB.
// Remove Frontier off-chain db
let db_config_dir = frontier_service::db_config_dir(&config);
let frontier_database_config = match config.database {
DatabaseSource::RocksDb { .. } => DatabaseSource::RocksDb {
path: frontier_database_dir(&db_config_dir, "db"),
cache_size: 0,
},
DatabaseSource::ParityDb { .. } => DatabaseSource::ParityDb {
path: frontier_database_dir(&db_config_dir, "paritydb"),
},
_ =>
return Err(format!("Cannot purge `{:?}` database", config.database).into()),
match cli.eth_args.frontier_backend_type {
FrontierBackendType::KeyValue => {
let frontier_database_config = match config.database {
DatabaseSource::RocksDb { .. } => DatabaseSource::RocksDb {
path: fc_db::kv::frontier_database_dir(&db_config_dir, "db"),
cache_size: 0,
},
DatabaseSource::ParityDb { .. } => DatabaseSource::ParityDb {
path: fc_db::kv::frontier_database_dir(&db_config_dir, "paritydb"),
},
_ => {
return Err(format!(
"Cannot purge `{:?}` database",
config.database
)
.into())
}
};
cmd.base.run(frontier_database_config)?;
}
FrontierBackendType::Sql => {
let db_path = db_config_dir.join("sql");
match std::fs::remove_dir_all(&db_path) {
Ok(_) => {
println!("{:?} removed.", &db_path);
}
Err(ref err) if err.kind() == std::io::ErrorKind::NotFound => {
eprintln!("{:?} did not exist.", &db_path);
}
Err(err) => {
return Err(format!(
"Cannot purge `{:?}` database: {:?}",
db_path, err,
)
.into())
}
};
}
};

cmd.base.run(frontier_database_config)?;

let polkadot_cli = RelayChainCli::new(
&config,
[RelayChainCli::executable_name()].iter().chain(cli.relay_chain_args.iter()),
Expand Down Expand Up @@ -498,61 +498,6 @@ pub fn run() -> Result<()> {
cmd.run(&*spec)
})
},
Some(Subcommand::FrontierDb(cmd)) => {
let runner = cli.create_runner(cmd)?;

runner.sync_run(|config| {
let chain_spec = &config.chain_spec;

set_default_ss58_version(chain_spec);

#[cfg(feature = "crab-native")]
if chain_spec.is_crab() {
let PartialComponents { client, other: (frontier_backend, ..), .. } =
service::new_partial::<CrabRuntimeApi, CrabRuntimeExecutor>(
&config,
&cli.eth_args.build_eth_rpc_config(),
)?;

return cmd.run::<_, dc_primitives::Block>(client, frontier_backend);
}

#[cfg(feature = "darwinia-native")]
if chain_spec.is_darwinia() {
let PartialComponents { client, other: (frontier_backend, ..), .. } =
service::new_partial::<DarwiniaRuntimeApi, DarwiniaRuntimeExecutor>(
&config,
&cli.eth_args.build_eth_rpc_config(),
)?;

return cmd.run::<_, dc_primitives::Block>(client, frontier_backend);
}

#[cfg(feature = "pangolin-native")]
if chain_spec.is_pangolin() {
let PartialComponents { client, other: (frontier_backend, ..), .. } =
service::new_partial::<PangolinRuntimeApi, PangolinRuntimeExecutor>(
&config,
&cli.eth_args.build_eth_rpc_config(),
)?;

return cmd.run::<_, dc_primitives::Block>(client, frontier_backend);
}

#[cfg(feature = "pangoro-native")]
if chain_spec.is_pangoro() {
let PartialComponents { client, other: (frontier_backend, ..), .. } =
service::new_partial::<PangoroRuntimeApi, PangoroRuntimeExecutor>(
&config,
&cli.eth_args.build_eth_rpc_config(),
)?;

return cmd.run::<_, dc_primitives::Block>(client, frontier_backend);
}

panic!("No feature(crab-native, darwinia-native, pangolin-native, pangoro-native) is enabled!");
})
},
#[cfg(feature = "runtime-benchmarks")]
Some(Subcommand::Benchmark(cmd)) => {
// substrate
Expand Down
Loading

0 comments on commit 0ea16bc

Please sign in to comment.