diff --git a/chain/mock/src/mock_chain.rs b/chain/mock/src/mock_chain.rs index c11e8c9766..4032073f11 100644 --- a/chain/mock/src/mock_chain.rs +++ b/chain/mock/src/mock_chain.rs @@ -4,6 +4,7 @@ use anyhow::{format_err, Result}; use starcoin_account_api::AccountInfo; use starcoin_chain::{BlockChain, ChainReader, ChainWriter}; +use starcoin_config::miner_config::G_MAX_PARENTS_COUNT; use starcoin_config::ChainNetwork; use starcoin_consensus::Consensus; use starcoin_crypto::HashValue; @@ -266,6 +267,7 @@ impl MockChain { prevous_ghostdata.as_ref(), 4, 3, + G_MAX_PARENTS_COUNT, )?; debug!( diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 4b72772df0..92875a0a0c 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -375,6 +375,7 @@ impl BlockChain { )?; let excluded_txns = opened_block.push_txns(user_txns)?; let template = opened_block.finalize()?; + Ok((template, excluded_txns)) } @@ -1381,7 +1382,20 @@ impl ChainReader for BlockChain { uncles: &[BlockHeader], header: &BlockHeader, ) -> Result { - Ok(self.dag().verify_and_ghostdata(uncles, header)?) + let latest_pruning_point = { + match self.storage.get_startup_info().unwrap_or(None) { + Some(startup_info) => self + .storage + .get_block_header_by_hash(startup_info.main) + .unwrap_or(None) + .map(|head_block| head_block.pruning_point()), + None => None, + } + }; + + Ok(self + .dag() + .verify_and_ghostdata(uncles, header, latest_pruning_point)?) } fn is_dag_ancestor_of(&self, ancestor: HashValue, descendant: HashValue) -> Result { diff --git a/config/src/lib.rs b/config/src/lib.rs index 491edce6b9..3bd5815186 100644 --- a/config/src/lib.rs +++ b/config/src/lib.rs @@ -33,7 +33,7 @@ pub mod genesis_config; mod helper; mod logger_config; mod metrics_config; -mod miner_config; +pub mod miner_config; mod network_config; mod rpc_config; mod storage_config; diff --git a/config/src/miner_config.rs b/config/src/miner_config.rs index 582042259c..1411905452 100644 --- a/config/src/miner_config.rs +++ b/config/src/miner_config.rs @@ -7,6 +7,8 @@ use clap::Parser; use serde::{Deserialize, Serialize}; use std::sync::Arc; +pub static G_MAX_PARENTS_COUNT: u64 = 16; + #[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize, Parser)] #[serde(deny_unknown_fields)] pub struct MinerConfig { @@ -34,6 +36,10 @@ pub struct MinerConfig { #[serde(skip)] #[clap(skip)] base: Option>, + + #[serde(skip_serializing_if = "Option::is_none")] + #[clap(long = "maximum-parents-count")] + pub maximum_parents_count: Option, } impl MinerConfig { @@ -60,6 +66,10 @@ impl MinerConfig { enable_stderr: true, }) } + + pub fn maximum_parents_count(&self) -> u64 { + self.maximum_parents_count.unwrap_or(G_MAX_PARENTS_COUNT) + } } #[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] @@ -103,6 +113,10 @@ impl ConfigModule for MinerConfig { self.block_gas_limit = opt.miner.block_gas_limit; } + if opt.miner.maximum_parents_count.is_some() { + self.maximum_parents_count = opt.miner.maximum_parents_count; + } + Ok(()) } } diff --git a/config/src/sync_config.rs b/config/src/sync_config.rs index dc23345229..1a7e5c6642 100644 --- a/config/src/sync_config.rs +++ b/config/src/sync_config.rs @@ -28,6 +28,18 @@ pub struct SyncConfig { help = "max retry times once sync block failed, default 15." )] max_retry_times: Option, + + /// the maximum gap between the current head block's number and the peer's block's number + /// and if the block height broadcast by a peer node is greater than the height of the local head block by this maximum value, + /// a regular sync process will be initiated; + /// otherwise, a lightweight sync process will be triggered, strengthening the reference relationship between nodes. + #[serde(skip_serializing_if = "Option::is_none")] + #[clap( + name = "lightweight-sync-max-gap", + long, + help = "The height difference threshold for triggering a lightweight sync." + )] + lightweight_sync_max_gap: Option, } impl SyncConfig { @@ -38,6 +50,10 @@ impl SyncConfig { pub fn max_retry_times(&self) -> u64 { self.max_retry_times.unwrap_or(15) } + + pub fn lightweight_sync_max_gap(&self) -> Option { + self.lightweight_sync_max_gap + } } impl ConfigModule for SyncConfig { @@ -50,6 +66,10 @@ impl ConfigModule for SyncConfig { self.max_retry_times = opt.sync.max_retry_times; } + if opt.sync.lightweight_sync_max_gap.is_some() { + self.lightweight_sync_max_gap = opt.sync.lightweight_sync_max_gap; + } + Ok(()) } } diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index 6ee2cf2404..a783118447 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -19,6 +19,7 @@ use crate::process_key_already_error; use crate::prune::pruning_point_manager::PruningPointManagerT; use crate::reachability::ReachabilityError; use anyhow::{bail, ensure, Ok}; +use itertools::Itertools; use parking_lot::Mutex; use rocksdb::WriteBatch; use starcoin_config::temp_dir; @@ -102,8 +103,12 @@ impl BlockDAG { } pub fn has_block_connected(&self, block_header: &BlockHeader) -> anyhow::Result { - let _ghostdata = match self.storage.ghost_dag_store.get_data(block_header.id()) { - std::result::Result::Ok(data) => data, + match self.storage.ghost_dag_store.has(block_header.id()) { + std::result::Result::Ok(true) => (), + std::result::Result::Ok(false) => { + warn!("failed to get ghostdata by hash, the block should be re-executed",); + return anyhow::Result::Ok(false); + } Err(e) => { warn!( "failed to get ghostdata by hash: {:?}, the block should be re-executed", @@ -113,8 +118,12 @@ impl BlockDAG { } }; - let _dag_header = match self.storage.header_store.get_header(block_header.id()) { - std::result::Result::Ok(header) => header, + match self.storage.header_store.has(block_header.id()) { + std::result::Result::Ok(true) => (), + std::result::Result::Ok(false) => { + warn!("failed to get header by hash, the block should be re-executed",); + return anyhow::Result::Ok(false); + } Err(e) => { warn!( "failed to get header by hash: {:?}, the block should be re-executed", @@ -196,6 +205,9 @@ impl BlockDAG { } pub fn check_ancestor_of(&self, ancestor: Hash, descendant: Hash) -> anyhow::Result { + if ancestor == Hash::zero() { + return Ok(true); + } inquirer::is_dag_ancestor_of( &*self.storage.reachability_store.read(), ancestor, @@ -308,8 +320,21 @@ impl BlockDAG { } }; - if self.storage.reachability_store.read().get_reindex_root()? != header.pruning_point() - && header.pruning_point() != HashValue::zero() + if header.pruning_point() == HashValue::zero() { + info!( + "try to hint virtual selected parent, root index: {:?}", + self.storage.reachability_store.read().get_reindex_root() + ); + let _ = inquirer::hint_virtual_selected_parent( + self.storage.reachability_store.write().deref_mut(), + header.parent_hash(), + ); + info!( + "after hint virtual selected parent, root index: {:?}", + self.storage.reachability_store.read().get_reindex_root() + ); + } else if self.storage.reachability_store.read().get_reindex_root()? + != header.pruning_point() && self .storage .reachability_store @@ -320,13 +345,14 @@ impl BlockDAG { "try to hint virtual selected parent, root index: {:?}", self.storage.reachability_store.read().get_reindex_root() ); - inquirer::hint_virtual_selected_parent( + let hint_result = inquirer::hint_virtual_selected_parent( self.storage.reachability_store.write().deref_mut(), header.pruning_point(), - )?; + ); info!( - "after hint virtual selected parent, root index: {:?}", - self.storage.reachability_store.read().get_reindex_root() + "after hint virtual selected parent, root index: {:?}, hint result: {:?}", + self.storage.reachability_store.read().get_reindex_root(), + hint_result ); } @@ -429,11 +455,6 @@ impl BlockDAG { // Generate ghostdag data let parents = header.parents(); - debug!( - "start to get the ghost data from block: {:?}, number: {:?}", - header.id(), - header.number() - ); let ghostdata = match self.ghostdata_by_hash(header.id())? { None => { // It must be the dag genesis if header is a format for a single chain @@ -459,13 +480,14 @@ impl BlockDAG { "try to hint virtual selected parent, root index: {:?}", self.storage.reachability_store.read().get_reindex_root() ); - inquirer::hint_virtual_selected_parent( + let hint_result = inquirer::hint_virtual_selected_parent( self.storage.reachability_store.write().deref_mut(), - header.pruning_point(), - )?; + header.parent_hash(), + ); info!( - "after hint virtual selected parent, root index: {:?}", - self.storage.reachability_store.read().get_reindex_root() + "after hint virtual selected parent, root index: {:?}, hint result: {:?}", + self.storage.reachability_store.read().get_reindex_root(), + hint_result ); } @@ -629,14 +651,48 @@ impl BlockDAG { previous_ghostdata: &GhostdagData, pruning_depth: u64, pruning_finality: u64, + max_parents_count: u64, ) -> anyhow::Result { - info!("start to calculate the mergeset and tips, previous pruning point: {:?}, previous ghostdata: {:?} and its red block count: {:?}", previous_pruning_point, previous_ghostdata.to_compact(), previous_ghostdata.mergeset_reds.len()); - let dag_state = self.get_dag_state(previous_pruning_point)?; + let mut dag_state = self.get_dag_state(previous_pruning_point)?; + + // filter + if dag_state.tips.len() > max_parents_count as usize { + dag_state.tips = dag_state + .tips + .into_iter() + .sorted_by(|a, b| { + let a_blue_work = self + .storage + .ghost_dag_store + .get_blue_work(*a) + .unwrap_or_else(|e| { + panic!( + "the ghostdag data should be existed for {:?}, e: {:?}", + a, e + ) + }); + let b_blue_work = self + .storage + .ghost_dag_store + .get_blue_work(*b) + .unwrap_or_else(|e| { + panic!( + "the ghostdag data should be existed for {:?}, e: {:?}", + b, e + ) + }); + if a_blue_work == b_blue_work { + a.cmp(b) + } else { + b_blue_work.cmp(&a_blue_work) + } + }) + .take(max_parents_count as usize) + .collect(); + } + let next_ghostdata = self.ghostdata(&dag_state.tips)?; - info!( - "start to calculate the mergeset and tips for tips: {:?}, and last pruning point: {:?} and next ghostdata: {:?}, red block count: {:?}", - dag_state.tips, previous_pruning_point, next_ghostdata.to_compact(), next_ghostdata.mergeset_reds.len() - ); + let next_pruning_point = self.pruning_point_manager().next_pruning_point( previous_pruning_point, previous_ghostdata, @@ -644,11 +700,12 @@ impl BlockDAG { pruning_depth, pruning_finality, )?; - info!( - "the next pruning point is: {:?}, and the previous pruning point is: {:?}", - next_pruning_point, previous_pruning_point - ); + if next_pruning_point == Hash::zero() || next_pruning_point == previous_pruning_point { + info!( + "tips: {:?}, the next pruning point is: {:?}, the current ghostdata's selected parent: {:?}, blue blocks are: {:?} and its red blocks are: {:?}", + dag_state.tips, next_pruning_point, next_ghostdata.selected_parent, next_ghostdata.mergeset_blues, next_ghostdata.mergeset_reds.len(), + ); anyhow::Ok(MineNewDagBlockInfo { tips: dag_state.tips, blue_blocks: (*next_ghostdata.mergeset_blues).clone(), @@ -660,15 +717,11 @@ impl BlockDAG { previous_pruning_point, next_pruning_point, )?; - let mergeset_blues = (*self - .ghost_dag_manager() - .ghostdag(&pruned_tips)? - .mergeset_blues) - .clone(); + let pruned_ghostdata = self.ghost_dag_manager().ghostdag(&pruned_tips)?; + let mergeset_blues = pruned_ghostdata.mergeset_blues.as_ref().clone(); info!( - "previous tips are: {:?}, the pruned tips are: {:?}, the mergeset blues are: {:?}, the next pruning point is: {:?}", - dag_state.tips, - pruned_tips, mergeset_blues, next_pruning_point + "the pruning was triggered, before pruning, the tips: {:?}, after pruning tips: {:?}, the next pruning point is: {:?}, the current ghostdata's selected parent: {:?}, blue blocks are: {:?} and its red blocks are: {:?}", + pruned_tips, dag_state.tips, next_pruning_point, pruned_ghostdata.selected_parent, pruned_ghostdata.mergeset_blues, pruned_ghostdata.mergeset_reds.len(), ); anyhow::Ok(MineNewDagBlockInfo { tips: pruned_tips, @@ -711,18 +764,73 @@ impl BlockDAG { self.pruning_point_manager().reachability_service() } + // return true the block processing will be going into the single chain logic, + // which means that this is a historical block that converge to the next pruning point + // return false it will be going into the ghost logic, + // which means that this is a new block that will be added by the ghost protocol that enhance the parallelism of the block processing. + // for vega, the special situation is: + // the pruning logic was delivered after vega running for a long time, so the historical block will be processed by the single chain logic. + fn check_historical_block( + &self, + header: &BlockHeader, + latest_pruning_point: Option, + ) -> Result { + if let Some(pruning_point) = latest_pruning_point { + match ( + header.pruning_point() == HashValue::zero(), + pruning_point == HashValue::zero(), + ) { + (true, true) => { + if header.chain_id().is_vega() { + Ok(true) + } else { + Ok(false) + } + } + (true, false) => Ok(true), + (false, true) => Ok(false), + (false, false) => { + if header.pruning_point() == pruning_point { + Ok(false) + } else if self.check_ancestor_of(header.pruning_point(), pruning_point)? { + Ok(true) + } else { + Ok(false) + } + } + } + } else { + Ok(false) + } + } + pub fn verify_and_ghostdata( &self, blue_blocks: &[BlockHeader], header: &BlockHeader, + latest_pruning_point: Option, ) -> Result { - if header.pruning_point() != HashValue::zero() { - self.ghost_dag_manager().ghostdag(&header.parents()) - } else { + info!( + "checking historical block: header pruning point: {:?}, latest pruning point: {:?}", + header.pruning_point(), + latest_pruning_point + ); + if self.check_historical_block(header, latest_pruning_point)? { + info!( + "the block is a historical block, the header id: {:?}", + header.id() + ); self.ghost_dag_manager() .verify_and_ghostdata(blue_blocks, header) + } else { + info!( + "the block is not a historical block, the header id: {:?}", + header.id() + ); + self.ghost_dag_manager().ghostdag(&header.parents()) } } + pub fn check_upgrade(&self, main: &BlockHeader, genesis_id: HashValue) -> anyhow::Result<()> { // set the state with key 0 if main.version() == 0 || main.version() == 1 { diff --git a/flexidag/src/consensusdb/db.rs b/flexidag/src/consensusdb/db.rs index 72632d11db..96c846c534 100644 --- a/flexidag/src/consensusdb/db.rs +++ b/flexidag/src/consensusdb/db.rs @@ -104,7 +104,7 @@ impl FlexiDagStorage { } pub fn write_batch(&self, batch: WriteBatch) -> Result<(), StoreError> { - self.db.raw_write_batch_sync(batch).map_err(|e| { + self.db.raw_write_batch(batch).map_err(|e| { StoreError::DBIoError(format!( "failed to write in batch for dag data, error: {:?}", e.to_string() diff --git a/flexidag/src/ghostdag/protocol.rs b/flexidag/src/ghostdag/protocol.rs index 435b09b421..72ad2006b2 100644 --- a/flexidag/src/ghostdag/protocol.rs +++ b/flexidag/src/ghostdag/protocol.rs @@ -485,6 +485,10 @@ impl< }); Ok(sorted_blocks) } + + pub fn k(&self) -> KType { + self.k + } } /// Chain block with attached ghostdag data diff --git a/flexidag/src/prune/pruning_point_manager.rs b/flexidag/src/prune/pruning_point_manager.rs index a81597cf69..8cd91ad397 100644 --- a/flexidag/src/prune/pruning_point_manager.rs +++ b/flexidag/src/prune/pruning_point_manager.rs @@ -1,5 +1,5 @@ use starcoin_crypto::HashValue; -use starcoin_logger::prelude::{debug, info}; +use starcoin_logger::prelude::info; use crate::reachability::reachability_service::ReachabilityService; use crate::{ @@ -42,6 +42,9 @@ impl PruningPointManagerT { current_pruning_point: HashValue, next_pruning_point: HashValue, ) -> anyhow::Result> { + if next_pruning_point == HashValue::zero() { + return Ok(dag_state.tips.clone()); + } if current_pruning_point == next_pruning_point { return Ok(dag_state.tips.clone()); } @@ -66,20 +69,14 @@ impl PruningPointManagerT { pruning_depth: u64, pruning_finality: u64, ) -> anyhow::Result { + info!( + "previous_pruning_point: {:?}, previous_ghostdata: {:?}, next_ghostdata: {:?}, pruning_depth: {}, pruning_finality: {}", + previous_pruning_point, previous_ghostdata.to_compact(), next_ghostdata.to_compact(), pruning_depth, pruning_finality + ); let min_required_blue_score_for_next_pruning_point = (self.finality_score(previous_ghostdata.blue_score, pruning_finality) + 1) * pruning_finality; - debug!( - "min_required_blue_score_for_next_pruning_point: {:?}", - min_required_blue_score_for_next_pruning_point - ); - - debug!("previous_pruning_point: {:?}, previous_ghostdata: {:?}, next_ghostdata: {:?}, pruning_depth: {:?}, pruning_finality: {:?}", - previous_pruning_point, previous_ghostdata, next_ghostdata, - pruning_depth, pruning_finality, - ); - let mut latest_pruning_ghost_data = previous_ghostdata.to_compact(); if min_required_blue_score_for_next_pruning_point + pruning_depth <= next_ghostdata.blue_score @@ -90,10 +87,6 @@ impl PruningPointManagerT { true, ) { let next_pruning_ghostdata = self.ghost_dag_store.get_data(child)?; - debug!( - "child: {:?}, observer2.blue_score: {:?}, next_pruning_ghostdata.blue_score: {:?}", - child, next_ghostdata.blue_score, next_pruning_ghostdata.blue_score - ); if next_ghostdata.blue_score - next_pruning_ghostdata.blue_score < pruning_depth { break; } @@ -114,7 +107,12 @@ impl PruningPointManagerT { if latest_pruning_ghost_data.selected_parent == previous_ghostdata.to_compact().selected_parent { - anyhow::Ok(HashValue::zero()) // still genesis + // anyhow::Ok(HashValue::zero()) // still genesis + if previous_pruning_point == HashValue::zero() { + anyhow::Ok(HashValue::zero()) + } else { + anyhow::Ok(previous_ghostdata.to_compact().selected_parent) + } } else { anyhow::Ok(latest_pruning_ghost_data.selected_parent) } diff --git a/flexidag/tests/tests.rs b/flexidag/tests/tests.rs index d9ddde5b34..d08dbcb63f 100644 --- a/flexidag/tests/tests.rs +++ b/flexidag/tests/tests.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use anyhow::{bail, format_err, Ok, Result}; +use starcoin_config::miner_config::G_MAX_PARENTS_COUNT; use starcoin_crypto::HashValue as Hash; use starcoin_dag::{ blockdag::{BlockDAG, MineNewDagBlockInfo}, @@ -1028,6 +1029,7 @@ fn test_prune() -> anyhow::Result<()> { previous_ghostdata.as_ref(), pruning_depth, pruning_finality, + G_MAX_PARENTS_COUNT, )?; assert_eq!(pruning_point, block_main_2.id()); @@ -1056,6 +1058,7 @@ fn test_prune() -> anyhow::Result<()> { previous_ghostdata.as_ref(), pruning_depth, pruning_finality, + G_MAX_PARENTS_COUNT, )?; assert_eq!(pruning_point, block_main_2.id()); diff --git a/sync/src/block_connector/block_connector_service.rs b/sync/src/block_connector/block_connector_service.rs index df3162e8ff..e3e65151bc 100644 --- a/sync/src/block_connector/block_connector_service.rs +++ b/sync/src/block_connector/block_connector_service.rs @@ -16,7 +16,6 @@ use anyhow::{bail, format_err, Ok, Result}; use network_api::PeerProvider; use starcoin_chain::BlockChain; use starcoin_chain_api::{ChainReader, ConnectBlockError, WriteableChainService}; -use starcoin_config::genesis_config::G_BASE_MAX_UNCLES_PER_BLOCK; use starcoin_config::{NodeConfig, G_CRATE_VERSION}; use starcoin_consensus::Consensus; use starcoin_crypto::HashValue; @@ -137,10 +136,20 @@ where return false; } let gap = current_number.saturating_sub(block_header.number()); - if gap <= G_BASE_MAX_UNCLES_PER_BLOCK.saturating_mul(2) { - return true; - } - false + let k = self.chain_service.get_dag().ghost_dag_manager().k() as u64; + let config_gap = self + .config + .sync + .lightweight_sync_max_gap() + .map_or(k.saturating_mul(2), |max_gap| max_gap); + debug!( + "is-near-block: current_number: {:?}, block_number: {:?}, gap: {:?}, config_gap: {:?}", + current_number, + block_header.number(), + gap, + config_gap + ); + gap <= config_gap } } @@ -444,6 +453,7 @@ where previous_ghostdata.as_ref(), pruning_depth, pruning_finality, + self.config.miner.maximum_parents_count(), )? } else { let genesis = ctx.get_shared::()?;