Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

blockchain: Implement header proof storage. #2938

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 24 additions & 8 deletions blockchain/chain.go
Original file line number Diff line number Diff line change
Expand Up @@ -677,14 +677,18 @@ func (b *BlockChain) connectBlock(node *blockNode, block, parent *dcrutil.Block,
return err
}

// NOTE: When more header commitments are added, the inclusion proofs
// will need to be generated and stored to the database here (when not
// already stored). There is no need to store them currently because
// there is only a single commitment which means there are no sibling
// hashes that typically form the inclusion proofs due to the fact a
// single leaf merkle tree reduces to having the same root as the leaf
// and therefore the proof only consists of checking the leaf hash
// itself against the commitment root.
// Determine the individual commitment hashes that comprise the leaves of
// the header commitment merkle tree depending on the active agendas. These
// are stored in the database below so that inclusion proofs can be
// generated for each commitment.
var hdrCommitmentLeaves []chainhash.Hash
hdrCommitmentsActive, err := b.isHeaderCommitmentsAgendaActive(node.parent)
if err != nil {
return err
}
if hdrCommitmentsActive {
hdrCommitmentLeaves = hdrCommitments.v1Leaves()
}

// Generate a new best state snapshot that will be used to update the
// database and later memory if all database updates are successful.
Expand Down Expand Up @@ -741,6 +745,13 @@ func (b *BlockChain) connectBlock(node *blockNode, block, parent *dcrutil.Block,
return err
}

// Store the leaf hashes of the header commitment merkle tree in the
// database. Nothing is written when there aren't any.
err = dbPutHeaderCommitments(dbTx, block.Hash(), hdrCommitmentLeaves)
if err != nil {
return err
}

return nil
})
if err != nil {
Expand Down Expand Up @@ -923,6 +934,10 @@ func (b *BlockChain) disconnectBlock(node *blockNode, block, parent *dcrutil.Blo
// NOTE: The GCS filter is intentionally not removed on disconnect to
// ensure that lightweight clients still have access to them if they
// happen to be on a side chain after coming back online after a reorg.
//
// Similarly, the commitment hashes needed to generate the associated
// inclusion proof for the header commitment are not removed for the
// same reason.

return nil
})
Expand Down Expand Up @@ -1248,6 +1263,7 @@ func (b *BlockChain) reorganizeChainInternal(target *blockNode) error {
return err
}
hdrCommitments.filter = filter
hdrCommitments.filterHash = filter.Hash()
} else {
// The block must pass all of the validation rules which depend on
// having the full block data for all of its ancestors available.
Expand Down
132 changes: 127 additions & 5 deletions blockchain/chainio.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ import (

const (
// currentDatabaseVersion indicates the current database version.
currentDatabaseVersion = 12
currentDatabaseVersion = 13

// currentBlockIndexVersion indicates the current block index database
// version.
Expand Down Expand Up @@ -96,6 +96,11 @@ var (
// filters.
gcsFilterBucketName = []byte("gcsfilters")

// headerCmtsBucketName is the name of the db bucket used to house header
// commitment journal entries which consist of the hashes that the
// commitment root field of blocks commit to.
headerCmtsBucketName = []byte("hdrcmts")

// treasuryBucketName is the name of the db bucket that is used to house
// TADD/TSPEND additions and subtractions from the treasury account.
treasuryBucketName = []byte("treasury")
Expand Down Expand Up @@ -826,8 +831,7 @@ func dbRemoveSpendJournalEntry(dbTx database.Tx, blockHash *chainhash.Hash) erro
//
// -----------------------------------------------------------------------------

// dbFetchGCSFilter fetches the GCS filter for the passed block and deserializes
// it into a slice of spent txout entries.
// dbFetchGCSFilter fetches the version 2 GCS filter for the passed block.
//
// When there is no entry for the provided hash, nil will be returned for both
// the filter and the error.
Expand All @@ -847,14 +851,126 @@ func dbFetchGCSFilter(dbTx database.Tx, blockHash *chainhash.Hash) (*gcs.FilterV
return filter, nil
}

// dbPutGCSFilter uses an existing database transaction to update the GCS filter
// for the given block hash using the provided filter.
// dbPutGCSFilter uses an existing database transaction to update the version 2
// GCS filter for the given block hash using the provided filter.
func dbPutGCSFilter(dbTx database.Tx, blockHash *chainhash.Hash, filter *gcs.FilterV2) error {
filterBucket := dbTx.Metadata().Bucket(gcsFilterBucketName)
serialized := filter.Bytes()
return filterBucket.Put(blockHash[:], serialized)
}

// -----------------------------------------------------------------------------
// The header commitments journal consists of an entry for each block connected
// to the main chain (or has ever been connected to it) that contains each of
// the individual commitments covered by the commitment root field of the header
// of that block.
//
// Note that there will also not be an entry for blocks that do not commit to
// anything such as those prior to the activation of the header commitments
// agenda on networks where it is not always active.
//
// The serialized key format is:
//
// <block hash>
//
// Field Type Size
// block hash chainhash.Hash chainhash.HashSize
//
// The serialized value format is:
//
// <num commitment hashes><commitment hashes>
//
// Field Type Size
// num commitment hashes VLQ variable
// commitment hashes
// commitment hash chainhash.Hash chainhash.HashSize
//
// -----------------------------------------------------------------------------

// serializeHeaderCommitments serializes the passed commitment hashes into a
// single byte slice according to the format described in detail above.
func serializeHeaderCommitments(commitments []chainhash.Hash) []byte {
// Nothing to serialize when there are no commitments.
if len(commitments) == 0 {
return nil
}

// Calculate the full size needed to serialize the commitments.
numCommitments := len(commitments)
serializedLen := serializeSizeVLQ(uint64(numCommitments)) +
numCommitments*chainhash.HashSize

// Serialize the commitments.
serialized := make([]byte, serializedLen)
offset := putVLQ(serialized, uint64(numCommitments))
for i := range commitments {
copy(serialized[offset:], commitments[i][:])
offset += chainhash.HashSize
}
return serialized
}

// deserializeHeaderCommitments decodes the passed serialized byte slice into a
// slice of commitment hashes according to the format described in detail above.
func deserializeHeaderCommitments(serialized []byte) ([]chainhash.Hash, error) {
// Nothing is serialized when there are no commitments.
if len(serialized) == 0 {
return nil, nil
}

// Deserialize the number of commitments.
numCommitments, offset := deserializeVLQ(serialized)
if offset >= len(serialized) {
str := "unexpected end of data after num commitments"
return nil, makeDbErr(database.ErrCorruption, str)
}

// Ensure there are enough bytes remaining to read for the expected number
// of commitments.
totalCommitmentsSize := int(numCommitments) * chainhash.HashSize
if len(serialized[offset:]) < totalCommitmentsSize {
str := fmt.Sprintf("unexpected end of data after number of commitments "+
"(got %v, need %v)", len(serialized[offset:]), totalCommitmentsSize)
return nil, makeDbErr(database.ErrCorruption, str)
}

// Deserialize the commitments.
commitments := make([]chainhash.Hash, numCommitments)
for i := 0; i < int(numCommitments); i++ {
copy(commitments[i][:], serialized[offset:offset+chainhash.HashSize])
offset += chainhash.HashSize
}

return commitments, nil
}

// dbFetchHeaderCommitments fetches the hashes that the commitment root field of
// the header commits to for the passed block.
//
// When there is no entry for the provided block hash, nil will be returned for
// both the commitment hashes and the error.
func dbFetchHeaderCommitments(dbTx database.Tx, blockHash *chainhash.Hash) ([]chainhash.Hash, error) {
commitmentsBucket := dbTx.Metadata().Bucket(headerCmtsBucketName)
serialized := commitmentsBucket.Get(blockHash[:])
return deserializeHeaderCommitments(serialized)
}

// dbPutHeaderCommitments uses an existing database transaction to update the
// hashes that the commitment root field of the header commits to for the passed
// block.
//
// No database entry will be created when the provided commitments slice is nil
// or empty (aka zero length).
func dbPutHeaderCommitments(dbTx database.Tx, blockHash *chainhash.Hash, commitments []chainhash.Hash) error {
serialized := serializeHeaderCommitments(commitments)
if len(serialized) == 0 {
return nil
}

commitmentsBucket := dbTx.Metadata().Bucket(headerCmtsBucketName)
return commitmentsBucket.Put(blockHash[:], serialized)
}

// -----------------------------------------------------------------------------
// The database information contains information about the version and date
// of the blockchain database.
Expand Down Expand Up @@ -1234,6 +1350,12 @@ func (b *BlockChain) createChainState() error {
return err
}
_, err = meta.CreateBucket(treasuryTSpendBucketName)
if err != nil {
return err
}

// Create the bucket that houses the header commitments.
_, err = meta.CreateBucket(headerCmtsBucketName)
return err
})
return err
Expand Down
95 changes: 95 additions & 0 deletions blockchain/chainio_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -792,6 +792,101 @@ func TestSpendJournalErrors(t *testing.T) {
}
}

// TestHeaderCommitmentSerialization ensures serializing and deserializing
// header commitment journal entries works as expected.
func TestHeaderCommitmentSerialization(t *testing.T) {
t.Parallel()

cmtOneHash := *mustParseHash("0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20")
cmtTwoHash := *mustParseHash("02030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f2021")
tests := []struct {
name string
commitments []chainhash.Hash
serialized []byte
}{{
name: "no commitments",
commitments: nil,
serialized: nil,
}, {
name: "one commitment",
commitments: []chainhash.Hash{cmtOneHash},
serialized: hexToBytes("01" +
"201f1e1d1c1b1a191817161514131211100f0e0d0c0b0a090807060504030201"),
}, {
name: "two commitmentments",
commitments: []chainhash.Hash{cmtOneHash, cmtTwoHash},
serialized: hexToBytes("02" +
"201f1e1d1c1b1a191817161514131211100f0e0d0c0b0a090807060504030201" +
"21201f1e1d1c1b1a191817161514131211100f0e0d0c0b0a0908070605040302"),
}}

for _, test := range tests {
// Ensure the commitments serialize to the expected value.
gotBytes := serializeHeaderCommitments(test.commitments)
if !bytes.Equal(gotBytes, test.serialized) {
t.Errorf("%q: mismatched bytes - got %x, want %x", test.name,
gotBytes, test.serialized)
continue
}

// Ensure the serialized bytes are decoded back to the expected
// commitments.
commitments, err := deserializeHeaderCommitments(test.serialized)
if err != nil {
t.Errorf("%q: unexpected error: %v", test.name, err)
continue
}
if !reflect.DeepEqual(commitments, test.commitments) {
t.Errorf("%q: mismatched commitments - got %v, want %v", test.name,
commitments, test.commitments)
continue
}
}
}

// TestHeaderCommitmentDeserializeErrors peforms negative tests against
// deserializing header commitment journal entries to ensure error paths work as
// expected.
func TestHeaderCommitmentDeserializeErrors(t *testing.T) {
t.Parallel()

tests := []struct {
name string
serialized []byte
err error
}{{
name: "short data in number of commitments",
serialized: hexToBytes("80"),
err: database.ErrCorruption,
}, {
name: "short data in commitment hashes",
serialized: hexToBytes("01" +
"201f1e1d1c1b1a191817161514131211100f0e0d0c0b0a0908070605040302"),
err: database.ErrCorruption,
}, {
name: "short data in commitment hashes 2 begin",
serialized: hexToBytes("02" +
"201f1e1d1c1b1a191817161514131211100f0e0d0c0b0a090807060504030201"),
err: database.ErrCorruption,
}, {
name: "short data in commitment hashes 2 end",
serialized: hexToBytes("02" +
"201f1e1d1c1b1a191817161514131211100f0e0d0c0b0a090807060504030201" +
"21201f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403"),
err: database.ErrCorruption,
}}

for _, test := range tests {
// Ensure the expected error type and code is returned.
_, err := deserializeHeaderCommitments(test.serialized)
if !errors.Is(err, test.err) {
t.Errorf("%q: wrong error -- got: %v, want: %v", test.name, err,
test.err)
continue
}
}
}

// TestBestChainStateSerialization ensures serializing and deserializing the
// best chain state works as expected.
func TestBestChainStateSerialization(t *testing.T) {
Expand Down
2 changes: 1 addition & 1 deletion blockchain/go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ require (
github.com/decred/dcrd/blockchain/standalone/v2 v2.1.0
github.com/decred/dcrd/chaincfg/chainhash v1.0.3
github.com/decred/dcrd/chaincfg/v3 v3.1.1
github.com/decred/dcrd/crypto/blake256 v1.0.0
github.com/decred/dcrd/database/v3 v3.0.0
github.com/decred/dcrd/dcrec v1.0.0
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1
Expand All @@ -23,7 +24,6 @@ require (
github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 // indirect
github.com/dchest/siphash v1.2.2 // indirect
github.com/decred/base58 v1.0.3 // indirect
github.com/decred/dcrd/crypto/blake256 v1.0.0 // indirect
github.com/decred/dcrd/crypto/ripemd160 v1.0.1 // indirect
github.com/decred/dcrd/dcrec/edwards/v2 v2.0.2 // indirect
github.com/golang/snappy v0.0.4 // indirect
Expand Down
Loading