Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bump Frontier version #1597

Merged
merged 1 commit into from
Sep 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
48 changes: 24 additions & 24 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

24 changes: 12 additions & 12 deletions node/src/service/frontier.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ pub fn spawn_tasks<B, BE, C>(
task_manager: &TaskManager,
client: Arc<C>,
backend: Arc<BE>,
frontier_backend: fc_db::Backend<B>,
frontier_backend: Arc<fc_db::Backend<B, C>>,
filter_pool: Option<FilterPool>,
overrides: Arc<OverrideHandle<B>>,
fee_history_cache: FeeHistoryCache,
Expand All @@ -79,7 +79,7 @@ where
BE: 'static + sc_client_api::backend::Backend<B>,
BE::State: sc_client_api::backend::StateBackend<Hashing>,
{
match frontier_backend.clone() {
match &*frontier_backend {
fc_db::Backend::KeyValue(bd) => {
task_manager.spawn_essential_handle().spawn(
"frontier-mapping-sync-worker",
Expand All @@ -90,7 +90,7 @@ where
client.clone(),
backend.clone(),
overrides.clone(),
Arc::new(bd),
bd.clone(),
3,
0,
fc_mapping_sync::SyncStrategy::Parachain,
Expand All @@ -107,10 +107,10 @@ where
fc_mapping_sync::sql::SyncWorker::run(
client.clone(),
backend.clone(),
Arc::new(bd),
bd.clone(),
client.import_notification_stream(),
fc_mapping_sync::sql::SyncWorkerConfig {
read_notification_timeout: Duration::from_secs(10),
read_notification_timeout: Duration::from_secs(30),
check_indexed_blocks_interval: Duration::from_secs(60),
},
fc_mapping_sync::SyncStrategy::Parachain,
Expand Down Expand Up @@ -168,9 +168,9 @@ where
let (debug_task, debug_requester) = DebugHandler::task(
Arc::clone(&client),
Arc::clone(&backend),
match frontier_backend {
fc_db::Backend::KeyValue(bd) => Arc::new(bd),
fc_db::Backend::Sql(bd) => Arc::new(bd),
match &*frontier_backend {
fc_db::Backend::KeyValue(bd) => bd.clone(),
fc_db::Backend::Sql(bd) => bd.clone(),
},
Arc::clone(&permit_pool),
Arc::clone(&overrides),
Expand Down Expand Up @@ -216,7 +216,7 @@ pub(crate) fn backend<B, BE, C>(
client: Arc<C>,
config: &sc_service::Configuration,
eth_rpc_config: EthRpcConfig,
) -> Result<fc_db::Backend<B>, String>
) -> Result<fc_db::Backend<B, C>, String>
where
B: 'static + sp_runtime::traits::Block<Hash = Hash>,
BE: 'static + sc_client_api::backend::Backend<B>,
Expand All @@ -229,9 +229,9 @@ where
let db_config_dir = db_config_dir(config);
let overrides = fc_storage::overrides_handle(client.clone());
match eth_rpc_config.frontier_backend_type {
FrontierBackendType::KeyValue => Ok(fc_db::Backend::<B>::KeyValue(
FrontierBackendType::KeyValue => Ok(fc_db::Backend::<B, C>::KeyValue(Arc::new(
fc_db::kv::Backend::open(Arc::clone(&client), &config.database, &db_config_dir)?,
)),
))),
FrontierBackendType::Sql => {
let db_path = db_config_dir.join("sql");
std::fs::create_dir_all(&db_path).expect("failed creating sql db directory");
Expand All @@ -251,7 +251,7 @@ where
overrides,
))
.unwrap_or_else(|err| panic!("failed creating sql backend: {:?}", err));
Ok(fc_db::Backend::<B>::Sql(backend))
Ok(fc_db::Backend::<B, C>::Sql(Arc::new(backend)))
},
}
}
17 changes: 9 additions & 8 deletions node/src/service/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ type Service<RuntimeApi> = sc_service::PartialComponents<
sc_consensus::DefaultImportQueue<Block>,
sc_transaction_pool::FullPool<Block, FullClient<RuntimeApi>>,
(
fc_db::Backend<Block>,
fc_db::Backend<Block, FullClient<RuntimeApi>>,
Option<fc_rpc_core::types::FilterPool>,
fc_rpc_core::types::FeeHistoryCache,
fc_rpc_core::types::FeeHistoryCacheLimit,
Expand Down Expand Up @@ -308,6 +308,7 @@ where
)
.await
.map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?;
let frontier_backend = Arc::new(frontier_backend);
let validator = parachain_config.role.is_authority();
let prometheus_registry = parachain_config.prometheus_registry().cloned();
let import_queue_service = import_queue.service();
Expand Down Expand Up @@ -382,7 +383,6 @@ where
let pool = transaction_pool.clone();
let network = network.clone();
let filter_pool = filter_pool.clone();
let frontier_backend = frontier_backend.clone();
let overrides = overrides;
let fee_history_cache = fee_history_cache.clone();
let max_past_logs = eth_rpc_config.max_past_logs;
Expand Down Expand Up @@ -411,9 +411,9 @@ where
network: network.clone(),
sync: sync_service.clone(),
filter_pool: filter_pool.clone(),
frontier_backend: match frontier_backend.clone() {
fc_db::Backend::KeyValue(bd) => Arc::new(bd),
fc_db::Backend::Sql(bd) => Arc::new(bd),
frontier_backend: match &*frontier_backend {
fc_db::Backend::KeyValue(bd) => bd.clone(),
fc_db::Backend::Sql(bd) => bd.clone(),
},
max_past_logs,
fee_history_cache: fee_history_cache.clone(),
Expand Down Expand Up @@ -754,6 +754,7 @@ where
);
}

let frontier_backend = Arc::new(frontier_backend);
let force_authoring = config.force_authoring;
let backoff_authoring_blocks = None::<()>;
let slot_duration = sc_consensus_aura::slot_duration(&*client)?;
Expand Down Expand Up @@ -917,9 +918,9 @@ where
network: network.clone(),
sync: sync_service.clone(),
filter_pool: filter_pool.clone(),
frontier_backend: match frontier_backend.clone() {
fc_db::Backend::KeyValue(bd) => Arc::new(bd),
fc_db::Backend::Sql(bd) => Arc::new(bd),
frontier_backend: match &*frontier_backend {
fc_db::Backend::KeyValue(bd) => bd.clone(),
fc_db::Backend::Sql(bd) => bd.clone(),
},
max_past_logs,
fee_history_cache: fee_history_cache.clone(),
Expand Down