Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add KZG commitment consistency check #6044

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 26 additions & 3 deletions beacon_node/beacon_chain/src/block_verification_types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ use crate::data_column_verification::{
use crate::eth1_finalization_cache::Eth1FinalizationData;
use crate::{get_block_root, GossipVerifiedBlock, PayloadVerificationOutcome};
use derivative::Derivative;
use itertools::Itertools;
use ssz_types::VariableList;
use state_processing::ConsensusContext;
use std::fmt::{Debug, Formatter};
Expand Down Expand Up @@ -162,10 +163,32 @@ impl<E: EthSpec> RpcBlock<E> {
) -> Result<Self, AvailabilityCheckError> {
let block_root = block_root.unwrap_or_else(|| get_block_root(&block));

if block.num_expected_blobs() > 0 && custody_columns.is_empty() {
// The number of required custody columns is out of scope here.
return Err(AvailabilityCheckError::MissingCustodyColumns);
let column_commitments = custody_columns
.iter()
.map(|c| c.as_data_column().kzg_commitments.clone())
.unique()
.at_most_one()
.map_err(|_| AvailabilityCheckError::KzgCommitmentsInconsistent)?;

if let (Some(column_commitments), Ok(block_commitments)) = (
column_commitments,
block.message().body().blob_kzg_commitments(),
) {
if column_commitments.len() != block_commitments.len() {
return Err(AvailabilityCheckError::KzgCommitmentListMismatch);
}
for (&column_commitment, &block_commitment) in
column_commitments.iter().zip(block_commitments.iter())
{
if column_commitment != block_commitment {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I actually want to remove this check for blob sidecars since the inclusion proof should ensure that this never happens. Have you observed this inconsistency issue on devnets?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah I actually ran into a few KZG errors during sync.

I think you raised a good point and this PR shouldn't be required. The proper fix is to ensure that we verify the inclusion proof - I think we're missing this in sampling and by range rpc. I'll raise an issue.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Raised issue #6111. Closing this one

return Err(AvailabilityCheckError::KzgCommitmentMismatch {
block_commitment,
blob_commitment: column_commitment,
});
}
}
}

// Treat empty data column lists as if they are missing.
let inner = if !custody_columns.is_empty() {
RpcBlockInner::BlockAndCustodyColumns(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,15 @@ pub enum Error {
Kzg(KzgError),
KzgNotInitialized,
KzgVerificationFailed,
/// `KzgCommitment` for blob does not match `KzgCommitments` from the block.
KzgCommitmentMismatch {
blob_commitment: KzgCommitment,
block_commitment: KzgCommitment,
},
/// `KzgCommitments` from data column sidecars do not match `KzgCommitments` from the block.
KzgCommitmentListMismatch,
/// `KzgCommitments` are not identical for all sidecars for the same block.
KzgCommitmentsInconsistent,
UnableToDetermineImportRequirement,
Unexpected,
SszTypes(ssz_types::Error),
Expand Down Expand Up @@ -52,6 +57,8 @@ impl Error {
| Error::BlobIndexInvalid(_)
| Error::DataColumnIndexInvalid(_)
| Error::KzgCommitmentMismatch { .. }
| Error::KzgCommitmentListMismatch
| Error::KzgCommitmentsInconsistent
| Error::KzgVerificationFailed => ErrorCategory::Malicious,
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,13 @@ impl<E: EthSpec> PendingComponents<E> {
&mut self.verified_blobs
}

/// Returns a mutable reference to the cached data columns.
pub fn get_cached_data_columns_mut(
&mut self,
) -> &mut RuntimeVariableList<KzgVerifiedCustodyDataColumn<E>> {
&mut self.verified_data_columns
}

/// Checks if a blob exists at the given index in the cache.
///
/// Returns:
Expand Down Expand Up @@ -208,12 +215,26 @@ impl<E: EthSpec> PendingComponents<E> {
}

/// Merges a given set of data columns into the cache.
///
/// Data columns that don't match the new block's commitments are evicted.
/// NOTE: It is important that data columns are evicted instead of returning an
/// error on a mismatch, so invalid data columns don't prevent valid blocks from
/// getting inserted.
fn merge_data_columns<I: IntoIterator<Item = KzgVerifiedCustodyDataColumn<E>>>(
&mut self,
kzg_verified_data_columns: I,
) -> Result<(), AvailabilityCheckError> {
let maybe_block_commitments = self
.get_cached_block()
.as_ref()
.map(|b| b.get_commitments());

for data_column in kzg_verified_data_columns {
// TODO(das): Add equivalent checks for data columns if necessary
if let Some(ref block_commitments) = maybe_block_commitments {
if data_column.as_data_column().kzg_commitments != *block_commitments {
continue;
}
}
if !self.data_column_exists(data_column.index()) {
self.verified_data_columns.push(data_column)?;
}
Expand All @@ -224,10 +245,29 @@ impl<E: EthSpec> PendingComponents<E> {
/// Inserts a new block and revalidates the existing blobs against it.
///
/// Blobs that don't match the new block's commitments are evicted.
pub fn merge_block(&mut self, block: DietAvailabilityPendingExecutedBlock<E>) {
/// NOTE: It is important that blobs are evicted instead of returning an error on a mismatch,
/// so invalid blobs don't prevent valid blocks from getting inserted.
pub fn merge_block(
&mut self,
block: DietAvailabilityPendingExecutedBlock<E>,
block_import_requirement: &BlockImportRequirement,
spec: &ChainSpec,
) -> Result<(), AvailabilityCheckError> {
self.insert_block(block);
let reinsert = std::mem::take(self.get_cached_blobs_mut());
self.merge_blobs(reinsert);
match block_import_requirement {
BlockImportRequirement::AllBlobs => {
let reinsert = std::mem::take(self.get_cached_blobs_mut());
self.merge_blobs(reinsert);
}
BlockImportRequirement::CustodyColumns(_) => {
let reinsert = std::mem::replace(
self.get_cached_data_columns_mut(),
RuntimeVariableList::empty(spec.number_of_columns),
);
self.merge_data_columns(reinsert)?;
}
}
Ok(())
}

/// Checks if the block and all of its expected blobs or custody columns (post-PeerDAS) are
Expand Down Expand Up @@ -843,7 +883,13 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
let epoch = pending_components
.epoch()
.ok_or(AvailabilityCheckError::UnableToDetermineImportRequirement)?;
self.block_import_requirement_from_epoch(epoch)
}

fn block_import_requirement_from_epoch(
&self,
epoch: Epoch,
) -> Result<BlockImportRequirement, AvailabilityCheckError> {
let peer_das_enabled = self.spec.is_peer_das_enabled_for_epoch(epoch);
if peer_das_enabled {
Ok(BlockImportRequirement::CustodyColumns(
Expand Down Expand Up @@ -1033,11 +1079,16 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
.pop_pending_components(block_root, &self.overflow_store)?
.unwrap_or_else(|| PendingComponents::empty(block_root, &self.spec));

let block_import_requirement =
self.block_import_requirement_from_epoch(diet_executed_block.epoch())?;
// Merge in the block.
pending_components.merge_block(diet_executed_block);
pending_components.merge_block(
diet_executed_block,
&block_import_requirement,
&self.spec,
)?;

// Check if we have all components and entire set is consistent.
let block_import_requirement = self.block_import_requirement(&pending_components)?;
if pending_components.is_available(&block_import_requirement, &self.log) {
write_lock.put_pending_components(
block_root,
Expand Down Expand Up @@ -2407,7 +2458,9 @@ mod pending_components_tests {
let block_root = Hash256::zero();
let spec = E::default_spec();
let mut cache = <PendingComponents<E>>::empty(block_root, &spec);
cache.merge_block(block_commitments);
cache
.merge_block(block_commitments, &BlockImportRequirement::AllBlobs, &spec)
.unwrap();
cache.merge_blobs(random_blobs);
cache.merge_blobs(blobs);

Expand All @@ -2423,7 +2476,9 @@ mod pending_components_tests {
let spec = E::default_spec();
let mut cache = <PendingComponents<E>>::empty(block_root, &spec);
cache.merge_blobs(random_blobs);
cache.merge_block(block_commitments);
cache
.merge_block(block_commitments, &BlockImportRequirement::AllBlobs, &spec)
.unwrap();
cache.merge_blobs(blobs);

assert_cache_consistent(cache);
Expand All @@ -2440,7 +2495,9 @@ mod pending_components_tests {
let mut cache = <PendingComponents<E>>::empty(block_root, &spec);
cache.merge_blobs(random_blobs);
cache.merge_blobs(blobs);
cache.merge_block(block_commitments);
cache
.merge_block(block_commitments, &BlockImportRequirement::AllBlobs, &spec)
.unwrap();

assert_empty_blob_cache(cache);
}
Expand All @@ -2454,7 +2511,9 @@ mod pending_components_tests {
let block_root = Hash256::zero();
let spec = E::default_spec();
let mut cache = <PendingComponents<E>>::empty(block_root, &spec);
cache.merge_block(block_commitments);
cache
.merge_block(block_commitments, &BlockImportRequirement::AllBlobs, &spec)
.unwrap();
cache.merge_blobs(blobs);
cache.merge_blobs(random_blobs);

Expand All @@ -2471,7 +2530,9 @@ mod pending_components_tests {
let spec = E::default_spec();
let mut cache = <PendingComponents<E>>::empty(block_root, &spec);
cache.merge_blobs(blobs);
cache.merge_block(block_commitments);
cache
.merge_block(block_commitments, &BlockImportRequirement::AllBlobs, &spec)
.unwrap();
cache.merge_blobs(random_blobs);

assert_cache_consistent(cache);
Expand All @@ -2488,7 +2549,9 @@ mod pending_components_tests {
let mut cache = <PendingComponents<E>>::empty(block_root, &spec);
cache.merge_blobs(blobs);
cache.merge_blobs(random_blobs);
cache.merge_block(block_commitments);
cache
.merge_block(block_commitments, &BlockImportRequirement::AllBlobs, &spec)
.unwrap();

assert_cache_consistent(cache);
}
Expand Down
Loading