diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go
index 694a3e51832b..ea79ab448423 100644
--- a/accounts/abi/bind/backends/simulated.go
+++ b/accounts/abi/bind/backends/simulated.go
@@ -260,7 +260,7 @@ func (b *SimulatedBackend) ForEachStorageAt(ctx context.Context, contract common
// TransactionReceipt returns the receipt of a transaction.
func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) {
- receipt, _, _, _ := core.GetReceipt(b.database, txHash)
+ receipt, _, _, _ := rawdb.ReadReceipt(b.database, txHash, b.config)
return receipt, nil
}
diff --git a/cmd/XDC/dao_test.go b/cmd/XDC/dao_test.go
index 5444a4b13761..f95c39ed4a63 100644
--- a/cmd/XDC/dao_test.go
+++ b/cmd/XDC/dao_test.go
@@ -22,10 +22,8 @@ import (
"path/filepath"
"testing"
- "github.com/XinFinOrg/XDPoSChain/core/rawdb"
-
"github.com/XinFinOrg/XDPoSChain/common"
- "github.com/XinFinOrg/XDPoSChain/core"
+ "github.com/XinFinOrg/XDPoSChain/core/rawdb"
)
// Genesis block for nodes which don't care about the DAO fork (i.e. not configured)
@@ -129,7 +127,7 @@ func testDAOForkBlockNewChain(t *testing.T, test int, genesis string, expectBloc
if genesis != "" {
genesisHash = daoGenesisHash
}
- config, err := core.GetChainConfig(db, genesisHash)
+ config, err := rawdb.ReadChainConfig(db, genesisHash)
if err != nil {
t.Errorf("test %d: failed to retrieve chain config: %v", test, err)
return // we want to return here, the other checks can't make it past this point (nil panic).
diff --git a/cmd/gc/main.go b/cmd/gc/main.go
index 4d1ba3a14137..ce2a779bc595 100644
--- a/cmd/gc/main.go
+++ b/cmd/gc/main.go
@@ -13,7 +13,6 @@ import (
"github.com/XinFinOrg/XDPoSChain/cmd/utils"
"github.com/XinFinOrg/XDPoSChain/common"
"github.com/XinFinOrg/XDPoSChain/common/lru"
- "github.com/XinFinOrg/XDPoSChain/core"
"github.com/XinFinOrg/XDPoSChain/core/rawdb"
"github.com/XinFinOrg/XDPoSChain/core/state"
"github.com/XinFinOrg/XDPoSChain/eth/ethconfig"
@@ -54,15 +53,15 @@ func main() {
flag.Parse()
db, _ := leveldb.New(*dir, ethconfig.Defaults.DatabaseCache, utils.MakeDatabaseHandles(0), "")
lddb := rawdb.NewDatabase(db)
- head := core.GetHeadBlockHash(lddb)
- currentHeader := core.GetHeader(lddb, head, core.GetBlockNumber(lddb, head))
+ head := rawdb.ReadHeadBlockHash(lddb)
+ currentHeader := rawdb.ReadHeader(lddb, head, *rawdb.ReadHeaderNumber(lddb, head))
tridb := trie.NewDatabase(lddb)
catchEventInterupt(db)
cache = lru.NewCache[common.Hash, struct{}](*cacheSize)
go func() {
for i := uint64(1); i <= currentHeader.Number.Uint64(); i++ {
- hash := core.GetCanonicalHash(lddb, i)
- root := core.GetHeader(lddb, hash, i).Root
+ hash := rawdb.ReadCanonicalHash(lddb, i)
+ root := rawdb.ReadHeader(lddb, hash, i).Root
trieRoot, err := trie.NewSecure(root, tridb)
if err != nil {
continue
diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go
index 0e736b95e0b3..8b974f5ae443 100644
--- a/cmd/utils/cmd.go
+++ b/cmd/utils/cmd.go
@@ -30,6 +30,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/common"
"github.com/XinFinOrg/XDPoSChain/core"
+ "github.com/XinFinOrg/XDPoSChain/core/rawdb"
"github.com/XinFinOrg/XDPoSChain/core/types"
"github.com/XinFinOrg/XDPoSChain/crypto"
"github.com/XinFinOrg/XDPoSChain/ethdb"
@@ -272,15 +273,13 @@ func ImportPreimages(db ethdb.Database, fn string) error {
// Accumulate the preimages and flush when enough ws gathered
preimages[crypto.Keccak256Hash(blob)] = common.CopyBytes(blob)
if len(preimages) > 1024 {
- if err := core.WritePreimages(db, 0, preimages); err != nil {
- return err
- }
+ rawdb.WritePreimages(db, preimages)
preimages = make(map[common.Hash][]byte)
}
}
// Flush the last batch preimage data
if len(preimages) > 0 {
- return core.WritePreimages(db, 0, preimages)
+ rawdb.WritePreimages(db, preimages)
}
return nil
}
diff --git a/contracts/utils.go b/contracts/utils.go
index b309e5bdd24e..cd007afad6d7 100644
--- a/contracts/utils.go
+++ b/contracts/utils.go
@@ -39,7 +39,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/consensus/XDPoS/utils"
"github.com/XinFinOrg/XDPoSChain/contracts/blocksigner/contract"
randomizeContract "github.com/XinFinOrg/XDPoSChain/contracts/randomize/contract"
- "github.com/XinFinOrg/XDPoSChain/core"
+ "github.com/XinFinOrg/XDPoSChain/core/rawdb"
"github.com/XinFinOrg/XDPoSChain/core/state"
"github.com/XinFinOrg/XDPoSChain/core/txpool"
"github.com/XinFinOrg/XDPoSChain/core/types"
@@ -337,7 +337,7 @@ func GetRewardForCheckpoint(c *XDPoS.XDPoS, chain consensus.ChainReader, header
block := chain.GetBlock(header.Hash(), i)
txs := block.Transactions()
if !chain.Config().IsTIPSigning(header.Number) {
- receipts := core.GetBlockReceipts(c.GetDb(), header.Hash(), i)
+ receipts := rawdb.ReadRawReceipts(c.GetDb(), header.Hash(), i)
signingTxs = c.CacheNoneTIPSigningTxs(header, txs, receipts)
} else {
signingTxs = c.CacheSigningTxs(header.Hash(), txs)
diff --git a/core/bench_test.go b/core/bench_test.go
index 0d797dbea5a4..d8994af3459a 100644
--- a/core/bench_test.go
+++ b/core/bench_test.go
@@ -238,12 +238,12 @@ func makeChainForBench(db ethdb.Database, full bool, count uint64) {
rawdb.WriteHeader(db, header)
rawdb.WriteCanonicalHash(db, hash, n)
- WriteTd(db, hash, n, big.NewInt(int64(n+1)))
+ rawdb.WriteTd(db, hash, n, big.NewInt(int64(n+1)))
if full || n == 0 {
block := types.NewBlockWithHeader(header)
rawdb.WriteBody(db, hash, n, block.Body())
- WriteBlockReceipts(db, hash, n, nil)
+ rawdb.WriteReceipts(db, hash, n, nil)
}
}
}
@@ -295,8 +295,8 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
header := chain.GetHeaderByNumber(n)
if full {
hash := header.Hash()
- GetBody(db, hash, n)
- GetBlockReceipts(db, hash, n)
+ rawdb.ReadBody(db, hash, n)
+ rawdb.ReadReceipts(db, hash, n, chain.Config())
}
}
diff --git a/core/blockchain.go b/core/blockchain.go
index 66b6fdcddc67..7f7422b16648 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -306,7 +306,7 @@ func (bc *BlockChain) addXDCxDb(XDCxDb ethdb.XDCxDatabase) {
// assumes that the chain manager mutex is held.
func (bc *BlockChain) loadLastState() error {
// Restore the last known head block
- head := GetHeadBlockHash(bc.db)
+ head := rawdb.ReadHeadBlockHash(bc.db)
if head == (common.Hash{}) {
// Corrupt or empty database, init from scratch
log.Warn("Empty database, resetting chain")
@@ -375,7 +375,7 @@ func (bc *BlockChain) loadLastState() error {
// Restore the last known head header
currentHeader := currentBlock.Header()
- if head := GetHeadHeaderHash(bc.db); head != (common.Hash{}) {
+ if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
if header := bc.GetHeaderByHash(head); header != nil {
currentHeader = header
}
@@ -393,7 +393,7 @@ func (bc *BlockChain) loadLastState() error {
bc.currentFastBlock.Store(currentBlock)
headFastBlockGauge.Update(int64(currentBlock.NumberU64()))
- if head := GetHeadFastBlockHash(bc.db); head != (common.Hash{}) {
+ if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) {
if block := bc.GetBlockByHash(head); block != nil {
bc.currentFastBlock.Store(block)
headFastBlockGauge.Update(int64(block.NumberU64()))
@@ -795,7 +795,11 @@ func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
if cached, ok := bc.bodyCache.Get(hash); ok {
return cached
}
- body := GetBody(bc.db, hash, bc.hc.GetBlockNumber(hash))
+ number := bc.hc.GetBlockNumber(hash)
+ if number == nil {
+ return nil
+ }
+ body := rawdb.ReadBody(bc.db, hash, *number)
if body == nil {
return nil
}
@@ -811,7 +815,11 @@ func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
if cached, ok := bc.bodyRLPCache.Get(hash); ok {
return cached
}
- body := GetBodyRLP(bc.db, hash, bc.hc.GetBlockNumber(hash))
+ number := bc.hc.GetBlockNumber(hash)
+ if number == nil {
+ return nil
+ }
+ body := rawdb.ReadBodyRLP(bc.db, hash, *number)
if len(body) == 0 {
return nil
}
@@ -825,8 +833,10 @@ func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
if bc.blockCache.Contains(hash) {
return true
}
- ok, _ := bc.db.Has(blockBodyKey(hash, number))
- return ok
+ if !bc.HasHeader(hash, number) {
+ return false
+ }
+ return rawdb.HasBody(bc.db, hash, number)
}
// HasFullState checks if state trie is fully present in the database or not.
@@ -868,7 +878,7 @@ func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
if block, ok := bc.blockCache.Get(hash); ok {
return block
}
- block := GetBlock(bc.db, hash, number)
+ block := rawdb.ReadBlock(bc.db, hash, number)
if block == nil {
return nil
}
@@ -879,13 +889,13 @@ func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
// GetBlockByHash retrieves a block from the database by hash, caching it if found.
func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
- return bc.GetBlock(hash, bc.hc.GetBlockNumber(hash))
+ return bc.GetBlock(hash, *bc.hc.GetBlockNumber(hash))
}
// GetBlockByNumber retrieves a block from the database by number, caching it
// (associated with its hash) if found.
func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
- hash := GetCanonicalHash(bc.db, number)
+ hash := rawdb.ReadCanonicalHash(bc.db, number)
if hash == (common.Hash{}) {
return nil
}
@@ -913,14 +923,17 @@ func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
// [deprecated by eth/62]
func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
number := bc.hc.GetBlockNumber(hash)
+ if number == nil {
+ return nil
+ }
for i := 0; i < n; i++ {
- block := bc.GetBlock(hash, number)
+ block := bc.GetBlock(hash, *number)
if block == nil {
break
}
blocks = append(blocks, block)
hash = block.ParentHash()
- number--
+ *number--
}
return
}
@@ -1205,12 +1218,8 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
}
// Write all the data out into the database
rawdb.WriteBody(batch, blockHash, blockNumber, block.Body())
- if err := WriteBlockReceipts(batch, blockHash, blockNumber, receipts); err != nil {
- return i, fmt.Errorf("failed to write block receipts: %v", err)
- }
- if err := WriteTxLookupEntries(batch, block); err != nil {
- return i, fmt.Errorf("failed to write lookup metadata: %v", err)
- }
+ rawdb.WriteReceipts(batch, blockHash, blockNumber, receipts)
+ rawdb.WriteTxLookupEntriesByBlock(batch, block)
// Write everything belongs to the blocks into the database. So that
// we can ensure all components of body is completed(body, receipts,
@@ -1242,9 +1251,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case
currentFastBlock := bc.CurrentFastBlock()
if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 {
- if err := WriteHeadFastBlockHash(bc.db, head.Hash()); err != nil {
- log.Crit("Failed to update head fast block hash", "err", err)
- }
+ rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
bc.currentFastBlock.Store(head)
headFastBlockGauge.Update(int64(head.NumberU64()))
}
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index 31ba9bb4a701..eb26fca2b840 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -129,7 +129,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
return err
}
blockchain.chainmu.MustLock()
- WriteTd(blockchain.db, block.Hash(), block.NumberU64(), new(big.Int).Add(block.Difficulty(), blockchain.GetTdByHash(block.ParentHash())))
+ rawdb.WriteTd(blockchain.db, block.Hash(), block.NumberU64(), new(big.Int).Add(block.Difficulty(), blockchain.GetTdByHash(block.ParentHash())))
rawdb.WriteBlock(blockchain.db, block)
statedb.Commit(true)
blockchain.chainmu.Unlock()
@@ -147,7 +147,7 @@ func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error
}
// Manually insert the header into the database, but don't reorganise (allows subsequent testing)
blockchain.chainmu.MustLock()
- WriteTd(blockchain.db, header.Hash(), header.Number.Uint64(), new(big.Int).Add(header.Difficulty, blockchain.GetTdByHash(header.ParentHash)))
+ rawdb.WriteTd(blockchain.db, header.Hash(), header.Number.Uint64(), new(big.Int).Add(header.Difficulty, blockchain.GetTdByHash(header.ParentHash)))
rawdb.WriteHeader(blockchain.db, header)
blockchain.chainmu.Unlock()
}
@@ -174,7 +174,7 @@ func TestLastBlock(t *testing.T) {
if _, err := blockchain.InsertChain(blocks); err != nil {
t.Fatalf("Failed to insert block: %v", err)
}
- if blocks[len(blocks)-1].Hash() != GetHeadBlockHash(blockchain.db) {
+ if blocks[len(blocks)-1].Hash() != rawdb.ReadHeadBlockHash(blockchain.db) {
t.Fatalf("Write/Get HeadBlockHash failed")
}
}
@@ -624,13 +624,13 @@ func TestFastVsFullChains(t *testing.T) {
} else if types.CalcUncleHash(fblock.Uncles()) != types.CalcUncleHash(ablock.Uncles()) {
t.Errorf("block #%d [%x]: uncles mismatch: have %v, want %v", num, hash, fblock.Uncles(), ablock.Uncles())
}
- if freceipts, areceipts := GetBlockReceipts(fastDb, hash, GetBlockNumber(fastDb, hash)), GetBlockReceipts(archiveDb, hash, GetBlockNumber(archiveDb, hash)); types.DeriveSha(freceipts) != types.DeriveSha(areceipts) {
+ if freceipts, areceipts := rawdb.ReadReceipts(fastDb, hash, *rawdb.ReadHeaderNumber(fastDb, hash), fast.Config()), rawdb.ReadReceipts(archiveDb, hash, *rawdb.ReadHeaderNumber(archiveDb, hash), fast.Config()); types.DeriveSha(freceipts) != types.DeriveSha(areceipts) {
t.Errorf("block #%d [%x]: receipts mismatch: have %v, want %v", num, hash, freceipts, areceipts)
}
}
// Check that the canonical chains are the same between the databases
for i := 0; i < len(blocks)+1; i++ {
- if fhash, ahash := GetCanonicalHash(fastDb, uint64(i)), GetCanonicalHash(archiveDb, uint64(i)); fhash != ahash {
+ if fhash, ahash := rawdb.ReadCanonicalHash(fastDb, uint64(i)), rawdb.ReadCanonicalHash(archiveDb, uint64(i)); fhash != ahash {
t.Errorf("block #%d: canonical hash mismatch: have %v, want %v", i, fhash, ahash)
}
}
@@ -810,28 +810,28 @@ func TestChainTxReorgs(t *testing.T) {
// removed tx
for i, tx := range (types.Transactions{pastDrop, freshDrop}) {
- if txn, _, _, _ := GetTransaction(db, tx.Hash()); txn != nil {
+ if txn, _, _, _ := rawdb.ReadTransaction(db, tx.Hash()); txn != nil {
t.Errorf("drop %d: tx %v found while shouldn't have been", i, txn)
}
- if rcpt, _, _, _ := GetReceipt(db, tx.Hash()); rcpt != nil {
+ if rcpt, _, _, _ := rawdb.ReadReceipt(db, tx.Hash(), blockchain.Config()); rcpt != nil {
t.Errorf("drop %d: receipt %v found while shouldn't have been", i, rcpt)
}
}
// added tx
for i, tx := range (types.Transactions{pastAdd, freshAdd, futureAdd}) {
- if txn, _, _, _ := GetTransaction(db, tx.Hash()); txn == nil {
+ if txn, _, _, _ := rawdb.ReadTransaction(db, tx.Hash()); txn == nil {
t.Errorf("add %d: expected tx to be found", i)
}
- if rcpt, _, _, _ := GetReceipt(db, tx.Hash()); rcpt == nil {
+ if rcpt, _, _, _ := rawdb.ReadReceipt(db, tx.Hash(), blockchain.Config()); rcpt == nil {
t.Errorf("add %d: expected receipt to be found", i)
}
}
// shared tx
for i, tx := range (types.Transactions{postponed, swapped}) {
- if txn, _, _, _ := GetTransaction(db, tx.Hash()); txn == nil {
+ if txn, _, _, _ := rawdb.ReadTransaction(db, tx.Hash()); txn == nil {
t.Errorf("share %d: expected tx to be found", i)
}
- if rcpt, _, _, _ := GetReceipt(db, tx.Hash()); rcpt == nil {
+ if rcpt, _, _, _ := rawdb.ReadReceipt(db, tx.Hash(), blockchain.Config()); rcpt == nil {
t.Errorf("share %d: expected receipt to be found", i)
}
}
@@ -986,7 +986,7 @@ func TestCanonicalBlockRetrieval(t *testing.T) {
// try to retrieve a block by its canonical hash and see if the block data can be retrieved.
for {
- ch := GetCanonicalHash(blockchain.db, block.NumberU64())
+ ch := rawdb.ReadCanonicalHash(blockchain.db, block.NumberU64())
if ch == (common.Hash{}) {
continue // busy wait for canonical hash to be written
}
@@ -994,7 +994,7 @@ func TestCanonicalBlockRetrieval(t *testing.T) {
t.Errorf("unknown canonical hash, want %s, got %s", block.Hash().Hex(), ch.Hex())
return
}
- fb := GetBlock(blockchain.db, ch, block.NumberU64())
+ fb := rawdb.ReadBlock(blockchain.db, ch, block.NumberU64())
if fb == nil {
t.Errorf("unable to retrieve block %d for canonical hash: %s", block.NumberU64(), ch.Hex())
return
diff --git a/core/chain_indexer.go b/core/chain_indexer.go
index 77cfac232bd7..d089bc2082bf 100644
--- a/core/chain_indexer.go
+++ b/core/chain_indexer.go
@@ -25,6 +25,7 @@ import (
"time"
"github.com/XinFinOrg/XDPoSChain/common"
+ "github.com/XinFinOrg/XDPoSChain/core/rawdb"
"github.com/XinFinOrg/XDPoSChain/core/types"
"github.com/XinFinOrg/XDPoSChain/ethdb"
"github.com/XinFinOrg/XDPoSChain/event"
@@ -207,7 +208,7 @@ func (c *ChainIndexer) eventLoop(currentHeader *types.Header, events chan ChainE
// TODO(karalabe): This operation is expensive and might block, causing the event system to
// potentially also lock up. We need to do with on a different thread somehow.
- if h := FindCommonAncestor(c.chainDb, prevHeader, header); h != nil {
+ if h := rawdb.FindCommonAncestor(c.chainDb, prevHeader, header); h != nil {
c.newHead(h.Number.Uint64(), true)
}
}
@@ -350,11 +351,11 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com
}
for number := section * c.sectionSize; number < (section+1)*c.sectionSize; number++ {
- hash := GetCanonicalHash(c.chainDb, number)
+ hash := rawdb.ReadCanonicalHash(c.chainDb, number)
if hash == (common.Hash{}) {
return common.Hash{}, fmt.Errorf("canonical block #%d unknown", number)
}
- header := GetHeader(c.chainDb, hash, number)
+ header := rawdb.ReadHeader(c.chainDb, hash, number)
if header == nil {
return common.Hash{}, fmt.Errorf("block #%d [%x..] not found", number, hash[:4])
} else if header.ParentHash != lastHead {
diff --git a/core/chain_indexer_test.go b/core/chain_indexer_test.go
index 4d44f3018a6f..d276a99a2aaa 100644
--- a/core/chain_indexer_test.go
+++ b/core/chain_indexer_test.go
@@ -93,7 +93,7 @@ func testChainIndexer(t *testing.T, count int) {
inject := func(number uint64) {
header := &types.Header{Number: big.NewInt(int64(number)), Extra: big.NewInt(rand.Int63()).Bytes()}
if number > 0 {
- header.ParentHash = GetCanonicalHash(db, number-1)
+ header.ParentHash = rawdb.ReadCanonicalHash(db, number-1)
}
rawdb.WriteHeader(db, header)
rawdb.WriteCanonicalHash(db, header.Hash(), number)
diff --git a/core/database_util.go b/core/database_util.go
deleted file mode 100644
index d25e06ac3a6f..000000000000
--- a/core/database_util.go
+++ /dev/null
@@ -1,587 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package core
-
-import (
- "bytes"
- "encoding/binary"
- "encoding/json"
- "errors"
- "fmt"
- "math/big"
-
- "github.com/XinFinOrg/XDPoSChain/common"
- "github.com/XinFinOrg/XDPoSChain/core/rawdb"
- "github.com/XinFinOrg/XDPoSChain/core/types"
- "github.com/XinFinOrg/XDPoSChain/ethdb"
- "github.com/XinFinOrg/XDPoSChain/log"
- "github.com/XinFinOrg/XDPoSChain/metrics"
- "github.com/XinFinOrg/XDPoSChain/params"
- "github.com/XinFinOrg/XDPoSChain/rlp"
-)
-
-// DatabaseReader wraps the Get method of a backing data store.
-type DatabaseReader interface {
- Get(key []byte) (value []byte, err error)
-}
-
-// DatabaseDeleter wraps the Delete method of a backing data store.
-type DatabaseDeleter interface {
- Delete(key []byte) error
-}
-
-var (
- headHeaderKey = []byte("LastHeader")
- headBlockKey = []byte("LastBlock")
- headFastKey = []byte("LastFast")
- trieSyncKey = []byte("TrieSync")
-
- // Data item prefixes (use single byte to avoid mixing data types, avoid `i`).
- headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
- tdSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + tdSuffix -> td
- numSuffix = []byte("n") // headerPrefix + num (uint64 big endian) + numSuffix -> hash
- blockHashPrefix = []byte("H") // blockHashPrefix + hash -> num (uint64 big endian)
- bodyPrefix = []byte("b") // bodyPrefix + num (uint64 big endian) + hash -> block body
- blockReceiptsPrefix = []byte("r") // blockReceiptsPrefix + num (uint64 big endian) + hash -> block receipts
- lookupPrefix = []byte("l") // lookupPrefix + hash -> transaction/receipt lookup metadata
- bloomBitsPrefix = []byte("B") // bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash -> bloom bits
-
- preimagePrefix = "secure-key-" // preimagePrefix + hash -> preimage
- configPrefix = []byte("ethereum-config-") // config prefix for the db
-
- // Chain index prefixes (use `i` + single byte to avoid mixing data types).
- BloomBitsIndexPrefix = []byte("iB") // BloomBitsIndexPrefix is the data table of a chain indexer to track its progress
-
- // used by old db, now only used for conversion
- oldReceiptsPrefix = []byte("receipts-")
- oldTxMetaSuffix = []byte{0x01}
-
- ErrChainConfigNotFound = errors.New("ChainConfig not found") // general config not found error
-
- preimageCounter = metrics.NewRegisteredCounter("db/preimage/total", nil)
- preimageHitCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil)
-)
-
-// TxLookupEntry is a positional metadata to help looking up the data content of
-// a transaction or receipt given only its hash.
-type TxLookupEntry struct {
- BlockHash common.Hash
- BlockIndex uint64
- Index uint64
-}
-
-// encodeBlockNumber encodes a block number as big endian uint64
-func encodeBlockNumber(number uint64) []byte {
- enc := make([]byte, 8)
- binary.BigEndian.PutUint64(enc, number)
- return enc
-}
-
-// GetCanonicalHash retrieves a hash assigned to a canonical block number.
-func GetCanonicalHash(db DatabaseReader, number uint64) common.Hash {
- data, _ := db.Get(append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...))
- if len(data) == 0 {
- return common.Hash{}
- }
- return common.BytesToHash(data)
-}
-
-// missingNumber is returned by GetBlockNumber if no header with the
-// given block hash has been stored in the database
-const missingNumber = uint64(0xffffffffffffffff)
-
-// GetBlockNumber returns the block number assigned to a block hash
-// if the corresponding header is present in the database
-func GetBlockNumber(db DatabaseReader, hash common.Hash) uint64 {
- data, _ := db.Get(append(blockHashPrefix, hash.Bytes()...))
- if len(data) != 8 {
- return missingNumber
- }
- return binary.BigEndian.Uint64(data)
-}
-
-// GetHeadHeaderHash retrieves the hash of the current canonical head block's
-// header. The difference between this and GetHeadBlockHash is that whereas the
-// last block hash is only updated upon a full block import, the last header
-// hash is updated already at header import, allowing head tracking for the
-// light synchronization mechanism.
-func GetHeadHeaderHash(db DatabaseReader) common.Hash {
- data, _ := db.Get(headHeaderKey)
- if len(data) == 0 {
- return common.Hash{}
- }
- return common.BytesToHash(data)
-}
-
-// GetHeadBlockHash retrieves the hash of the current canonical head block.
-func GetHeadBlockHash(db DatabaseReader) common.Hash {
- data, _ := db.Get(headBlockKey)
- if len(data) == 0 {
- return common.Hash{}
- }
- return common.BytesToHash(data)
-}
-
-// GetHeadFastBlockHash retrieves the hash of the current canonical head block during
-// fast synchronization. The difference between this and GetHeadBlockHash is that
-// whereas the last block hash is only updated upon a full block import, the last
-// fast hash is updated when importing pre-processed blocks.
-func GetHeadFastBlockHash(db DatabaseReader) common.Hash {
- data, _ := db.Get(headFastKey)
- if len(data) == 0 {
- return common.Hash{}
- }
- return common.BytesToHash(data)
-}
-
-// GetTrieSyncProgress retrieves the number of tries nodes fast synced to allow
-// reportinc correct numbers across restarts.
-func GetTrieSyncProgress(db DatabaseReader) uint64 {
- data, _ := db.Get(trieSyncKey)
- if len(data) == 0 {
- return 0
- }
- return new(big.Int).SetBytes(data).Uint64()
-}
-
-// GetHeaderRLP retrieves a block header in its raw RLP database encoding, or nil
-// if the header's not found.
-func GetHeaderRLP(db DatabaseReader, hash common.Hash, number uint64) rlp.RawValue {
- data, _ := db.Get(headerKey(hash, number))
- return data
-}
-
-// GetHeader retrieves the block header corresponding to the hash, nil if none
-// found.
-func GetHeader(db DatabaseReader, hash common.Hash, number uint64) *types.Header {
- data := GetHeaderRLP(db, hash, number)
- if len(data) == 0 {
- return nil
- }
- header := new(types.Header)
- if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
- log.Error("Invalid block header RLP", "hash", hash, "err", err)
- return nil
- }
- return header
-}
-
-// GetBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
-func GetBodyRLP(db DatabaseReader, hash common.Hash, number uint64) rlp.RawValue {
- data, _ := db.Get(blockBodyKey(hash, number))
- return data
-}
-
-func headerKey(hash common.Hash, number uint64) []byte {
- return append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
-}
-
-func blockBodyKey(hash common.Hash, number uint64) []byte {
- return append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
-}
-
-// GetBody retrieves the block body (transactons, uncles) corresponding to the
-// hash, nil if none found.
-func GetBody(db DatabaseReader, hash common.Hash, number uint64) *types.Body {
- data := GetBodyRLP(db, hash, number)
- if len(data) == 0 {
- return nil
- }
- body := new(types.Body)
- if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
- log.Error("Invalid block body RLP", "hash", hash, "err", err)
- return nil
- }
- return body
-}
-
-// GetTd retrieves a block's total difficulty corresponding to the hash, nil if
-// none found.
-func GetTd(db DatabaseReader, hash common.Hash, number uint64) *big.Int {
- data, _ := db.Get(append(append(append(headerPrefix, encodeBlockNumber(number)...), hash[:]...), tdSuffix...))
- if len(data) == 0 {
- return nil
- }
- td := new(big.Int)
- if err := rlp.Decode(bytes.NewReader(data), td); err != nil {
- log.Error("Invalid block total difficulty RLP", "hash", hash, "err", err)
- return nil
- }
- return td
-}
-
-// GetBlock retrieves an entire block corresponding to the hash, assembling it
-// back from the stored header and body. If either the header or body could not
-// be retrieved nil is returned.
-//
-// Note, due to concurrent download of header and block body the header and thus
-// canonical hash can be stored in the database but the body data not (yet).
-func GetBlock(db DatabaseReader, hash common.Hash, number uint64) *types.Block {
- // Retrieve the block header and body contents
- header := GetHeader(db, hash, number)
- if header == nil {
- return nil
- }
- body := GetBody(db, hash, number)
- if body == nil {
- return nil
- }
- // Reassemble the block and return
- return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles)
-}
-
-// GetBlockReceipts retrieves the receipts generated by the transactions included
-// in a block given by its hash.
-func GetBlockReceipts(db DatabaseReader, hash common.Hash, number uint64) types.Receipts {
- data, _ := db.Get(append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash[:]...))
- if len(data) == 0 {
- return nil
- }
- storageReceipts := []*types.ReceiptForStorage{}
- if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
- log.Error("Invalid receipt array RLP", "hash", hash, "err", err)
- return nil
- }
- receipts := make(types.Receipts, len(storageReceipts))
- for i, receipt := range storageReceipts {
- receipts[i] = (*types.Receipt)(receipt)
- receipts[i].BlockHash = hash
- receipts[i].BlockNumber = big.NewInt(0).SetUint64(number)
- receipts[i].TransactionIndex = uint(i)
- for _, log := range receipts[i].Logs {
- // set BlockHash to fix #650
- log.BlockHash = hash
- }
- }
- return receipts
-}
-
-// GetTxLookupEntry retrieves the positional metadata associated with a transaction
-// hash to allow retrieving the transaction or receipt by hash.
-func GetTxLookupEntry(db DatabaseReader, hash common.Hash) (common.Hash, uint64, uint64) {
- // Load the positional metadata from disk and bail if it fails
- data, _ := db.Get(append(lookupPrefix, hash.Bytes()...))
- if len(data) == 0 {
- return common.Hash{}, 0, 0
- }
- // Parse and return the contents of the lookup entry
- var entry TxLookupEntry
- if err := rlp.DecodeBytes(data, &entry); err != nil {
- log.Error("Invalid lookup entry RLP", "hash", hash, "err", err)
- return common.Hash{}, 0, 0
- }
- return entry.BlockHash, entry.BlockIndex, entry.Index
-}
-
-// GetTransaction retrieves a specific transaction from the database, along with
-// its added positional metadata.
-func GetTransaction(db DatabaseReader, hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64) {
- // Retrieve the lookup metadata and resolve the transaction from the body
- blockHash, blockNumber, txIndex := GetTxLookupEntry(db, hash)
-
- if blockHash != (common.Hash{}) {
- body := GetBody(db, blockHash, blockNumber)
- if body == nil || len(body.Transactions) <= int(txIndex) {
- log.Error("Transaction referenced missing", "number", blockNumber, "hash", blockHash, "index", txIndex)
- return nil, common.Hash{}, 0, 0
- }
- return body.Transactions[txIndex], blockHash, blockNumber, txIndex
- }
- // Old transaction representation, load the transaction and it's metadata separately
- data, _ := db.Get(hash.Bytes())
- if len(data) == 0 {
- return nil, common.Hash{}, 0, 0
- }
- var tx types.Transaction
- if err := rlp.DecodeBytes(data, &tx); err != nil {
- return nil, common.Hash{}, 0, 0
- }
- // Retrieve the blockchain positional metadata
- data, _ = db.Get(append(hash.Bytes(), oldTxMetaSuffix...))
- if len(data) == 0 {
- return nil, common.Hash{}, 0, 0
- }
- var entry TxLookupEntry
- if err := rlp.DecodeBytes(data, &entry); err != nil {
- return nil, common.Hash{}, 0, 0
- }
- return &tx, entry.BlockHash, entry.BlockIndex, entry.Index
-}
-
-// GetReceipt retrieves a specific transaction receipt from the database, along with
-// its added positional metadata.
-func GetReceipt(db DatabaseReader, hash common.Hash) (*types.Receipt, common.Hash, uint64, uint64) {
- // Retrieve the lookup metadata and resolve the receipt from the receipts
- blockHash, blockNumber, receiptIndex := GetTxLookupEntry(db, hash)
-
- if blockHash != (common.Hash{}) {
- receipts := GetBlockReceipts(db, blockHash, blockNumber)
- if len(receipts) <= int(receiptIndex) {
- log.Error("Receipt refereced missing", "number", blockNumber, "hash", blockHash, "index", receiptIndex)
- return nil, common.Hash{}, 0, 0
- }
- return receipts[receiptIndex], blockHash, blockNumber, receiptIndex
- }
- // Old receipt representation, load the receipt and set an unknown metadata
- data, _ := db.Get(append(oldReceiptsPrefix, hash[:]...))
- if len(data) == 0 {
- return nil, common.Hash{}, 0, 0
- }
- var receipt types.ReceiptForStorage
- err := rlp.DecodeBytes(data, &receipt)
- if err != nil {
- log.Error("Invalid receipt RLP", "hash", hash, "err", err)
- }
- return (*types.Receipt)(&receipt), common.Hash{}, 0, 0
-}
-
-// GetBloomBits retrieves the compressed bloom bit vector belonging to the given
-// section and bit index from the.
-func GetBloomBits(db DatabaseReader, bit uint, section uint64, head common.Hash) ([]byte, error) {
- key := append(append(bloomBitsPrefix, make([]byte, 10)...), head.Bytes()...)
-
- binary.BigEndian.PutUint16(key[1:], uint16(bit))
- binary.BigEndian.PutUint64(key[3:], section)
-
- return db.Get(key)
-}
-
-// WriteHeadHeaderHash stores the head header's hash.
-func WriteHeadHeaderHash(db ethdb.KeyValueWriter, hash common.Hash) error {
- if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
- log.Crit("Failed to store last header's hash", "err", err)
- }
- return nil
-}
-
-// WriteHeadFastBlockHash stores the fast head block's hash.
-func WriteHeadFastBlockHash(db ethdb.KeyValueWriter, hash common.Hash) error {
- if err := db.Put(headFastKey, hash.Bytes()); err != nil {
- log.Crit("Failed to store last fast block's hash", "err", err)
- }
- return nil
-}
-
-// WriteTrieSyncProgress stores the fast sync trie process counter to support
-// retrieving it across restarts.
-func WriteTrieSyncProgress(db ethdb.KeyValueWriter, count uint64) error {
- if err := db.Put(trieSyncKey, new(big.Int).SetUint64(count).Bytes()); err != nil {
- log.Crit("Failed to store fast sync trie progress", "err", err)
- }
- return nil
-}
-
-// WriteTd serializes the total difficulty of a block into the database.
-func WriteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64, td *big.Int) error {
- data, err := rlp.EncodeToBytes(td)
- if err != nil {
- return err
- }
- key := append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...), tdSuffix...)
- if err := db.Put(key, data); err != nil {
- log.Crit("Failed to store block total difficulty", "err", err)
- }
- return nil
-}
-
-// WriteBlockReceipts stores all the transaction receipts belonging to a block
-// as a single receipt slice. This is used during chain reorganisations for
-// rescheduling dropped transactions.
-func WriteBlockReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64, receipts types.Receipts) error {
- // Convert the receipts into their storage form and serialize them
- storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
- for i, receipt := range receipts {
- storageReceipts[i] = (*types.ReceiptForStorage)(receipt)
- }
- bytes, err := rlp.EncodeToBytes(storageReceipts)
- if err != nil {
- return err
- }
- // Store the flattened receipt slice
- key := append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
- if err := db.Put(key, bytes); err != nil {
- log.Crit("Failed to store block receipts", "err", err)
- }
- return nil
-}
-
-// WriteTxLookupEntries stores a positional metadata for every transaction from
-// a block, enabling hash based transaction and receipt lookups.
-func WriteTxLookupEntries(db ethdb.KeyValueWriter, block *types.Block) error {
- // Iterate over each transaction and encode its metadata
- for i, tx := range block.Transactions() {
- entry := TxLookupEntry{
- BlockHash: block.Hash(),
- BlockIndex: block.NumberU64(),
- Index: uint64(i),
- }
- data, err := rlp.EncodeToBytes(entry)
- if err != nil {
- return err
- }
- if err := db.Put(append(lookupPrefix, tx.Hash().Bytes()...), data); err != nil {
- return err
- }
- }
- return nil
-}
-
-// WriteBloomBits writes the compressed bloom bits vector belonging to the given
-// section and bit index.
-func WriteBloomBits(db ethdb.KeyValueWriter, bit uint, section uint64, head common.Hash, bits []byte) {
- key := append(append(bloomBitsPrefix, make([]byte, 10)...), head.Bytes()...)
-
- binary.BigEndian.PutUint16(key[1:], uint16(bit))
- binary.BigEndian.PutUint64(key[3:], section)
-
- if err := db.Put(key, bits); err != nil {
- log.Crit("Failed to store bloom bits", "err", err)
- }
-}
-
-// DeleteCanonicalHash removes the number to hash canonical mapping.
-func DeleteCanonicalHash(db DatabaseDeleter, number uint64) {
- db.Delete(append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...))
-}
-
-// DeleteHeader removes all block header data associated with a hash.
-func DeleteHeader(db DatabaseDeleter, hash common.Hash, number uint64) {
- db.Delete(append(blockHashPrefix, hash.Bytes()...))
- db.Delete(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
-}
-
-// DeleteBody removes all block body data associated with a hash.
-func DeleteBody(db DatabaseDeleter, hash common.Hash, number uint64) {
- db.Delete(append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
-}
-
-// DeleteTd removes all block total difficulty data associated with a hash.
-func DeleteTd(db DatabaseDeleter, hash common.Hash, number uint64) {
- db.Delete(append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...), tdSuffix...))
-}
-
-// DeleteBlock removes all block data associated with a hash.
-func DeleteBlock(db DatabaseDeleter, hash common.Hash, number uint64) {
- DeleteBlockReceipts(db, hash, number)
- DeleteHeader(db, hash, number)
- DeleteBody(db, hash, number)
- DeleteTd(db, hash, number)
-}
-
-// DeleteBlockReceipts removes all receipt data associated with a block hash.
-func DeleteBlockReceipts(db DatabaseDeleter, hash common.Hash, number uint64) {
- db.Delete(append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
-}
-
-// PreimageTable returns a Database instance with the key prefix for preimage entries.
-func PreimageTable(db ethdb.Database) ethdb.Database {
- return rawdb.NewTable(db, preimagePrefix)
-}
-
-// WritePreimages writes the provided set of preimages to the database. `number` is the
-// current block number, and is used for debug messages only.
-func WritePreimages(db ethdb.Database, number uint64, preimages map[common.Hash][]byte) error {
- table := PreimageTable(db)
- batch := table.NewBatch()
- hitCount := 0
- for hash, preimage := range preimages {
- if _, err := table.Get(hash.Bytes()); err != nil {
- batch.Put(hash.Bytes(), preimage)
- hitCount++
- }
- }
- preimageCounter.Inc(int64(len(preimages)))
- preimageHitCounter.Inc(int64(hitCount))
- if hitCount > 0 {
- if err := batch.Write(); err != nil {
- return fmt.Errorf("preimage write fail for block %d: %v", number, err)
- }
- }
- return nil
-}
-
-// GetBlockChainVersion reads the version number from db.
-func GetBlockChainVersion(db DatabaseReader) int {
- var vsn uint
- enc, _ := db.Get([]byte("BlockchainVersion"))
- rlp.DecodeBytes(enc, &vsn)
- return int(vsn)
-}
-
-// WriteBlockChainVersion writes vsn as the version number to db.
-func WriteBlockChainVersion(db ethdb.KeyValueWriter, vsn int) {
- enc, _ := rlp.EncodeToBytes(uint(vsn))
- db.Put([]byte("BlockchainVersion"), enc)
-}
-
-// WriteChainConfig writes the chain config settings to the database.
-func WriteChainConfig(db ethdb.KeyValueWriter, hash common.Hash, cfg *params.ChainConfig) error {
- // short circuit and ignore if nil config. GetChainConfig
- // will return a default.
- if cfg == nil {
- return nil
- }
-
- jsonChainConfig, err := json.Marshal(cfg)
- if err != nil {
- return err
- }
-
- return db.Put(append(configPrefix, hash[:]...), jsonChainConfig)
-}
-
-// GetChainConfig will fetch the network settings based on the given hash.
-func GetChainConfig(db DatabaseReader, hash common.Hash) (*params.ChainConfig, error) {
- jsonChainConfig, _ := db.Get(append(configPrefix, hash[:]...))
- if len(jsonChainConfig) == 0 {
- return nil, ErrChainConfigNotFound
- }
-
- var config params.ChainConfig
- if err := json.Unmarshal(jsonChainConfig, &config); err != nil {
- return nil, err
- }
-
- return &config, nil
-}
-
-// FindCommonAncestor returns the last common ancestor of two block headers
-func FindCommonAncestor(db DatabaseReader, a, b *types.Header) *types.Header {
- for bn := b.Number.Uint64(); a.Number.Uint64() > bn; {
- a = GetHeader(db, a.ParentHash, a.Number.Uint64()-1)
- if a == nil {
- return nil
- }
- }
- for an := a.Number.Uint64(); an < b.Number.Uint64(); {
- b = GetHeader(db, b.ParentHash, b.Number.Uint64()-1)
- if b == nil {
- return nil
- }
- }
- for a.Hash() != b.Hash() {
- a = GetHeader(db, a.ParentHash, a.Number.Uint64()-1)
- if a == nil {
- return nil
- }
- b = GetHeader(db, b.ParentHash, b.Number.Uint64()-1)
- if b == nil {
- return nil
- }
- }
- return a
-}
diff --git a/core/database_util_test.go b/core/database_util_test.go
deleted file mode 100644
index 0c1494ba527e..000000000000
--- a/core/database_util_test.go
+++ /dev/null
@@ -1,376 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package core
-
-import (
- "bytes"
- "math/big"
- "testing"
-
- "github.com/XinFinOrg/XDPoSChain/common"
- "github.com/XinFinOrg/XDPoSChain/core/rawdb"
- "github.com/XinFinOrg/XDPoSChain/core/types"
- "github.com/XinFinOrg/XDPoSChain/rlp"
- "golang.org/x/crypto/sha3"
-)
-
-// Tests block header storage and retrieval operations.
-func TestHeaderStorage(t *testing.T) {
- db := rawdb.NewMemoryDatabase()
-
- // Create a test header to move around the database and make sure it's really new
- header := &types.Header{Number: big.NewInt(42), Extra: []byte("test header")}
- if entry := GetHeader(db, header.Hash(), header.Number.Uint64()); entry != nil {
- t.Fatalf("Non existent header returned: %v", entry)
- }
- // Write and verify the header in the database
- rawdb.WriteHeader(db, header)
- if entry := GetHeader(db, header.Hash(), header.Number.Uint64()); entry == nil {
- t.Fatalf("Stored header not found")
- } else if entry.Hash() != header.Hash() {
- t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, header)
- }
- if entry := GetHeaderRLP(db, header.Hash(), header.Number.Uint64()); entry == nil {
- t.Fatalf("Stored header RLP not found")
- } else {
- hasher := sha3.NewLegacyKeccak256()
- hasher.Write(entry)
-
- if hash := common.BytesToHash(hasher.Sum(nil)); hash != header.Hash() {
- t.Fatalf("Retrieved RLP header mismatch: have %v, want %v", entry, header)
- }
- }
- // Delete the header and verify the execution
- DeleteHeader(db, header.Hash(), header.Number.Uint64())
- if entry := GetHeader(db, header.Hash(), header.Number.Uint64()); entry != nil {
- t.Fatalf("Deleted header returned: %v", entry)
- }
-}
-
-// Tests block body storage and retrieval operations.
-func TestBodyStorage(t *testing.T) {
- db := rawdb.NewMemoryDatabase()
-
- // Create a test body to move around the database and make sure it's really new
- body := &types.Body{Uncles: []*types.Header{{Extra: []byte("test header")}}}
-
- hasher := sha3.NewLegacyKeccak256()
- rlp.Encode(hasher, body)
- hash := common.BytesToHash(hasher.Sum(nil))
-
- if entry := GetBody(db, hash, 0); entry != nil {
- t.Fatalf("Non existent body returned: %v", entry)
- }
- // Write and verify the body in the database
- rawdb.WriteBody(db, hash, 0, body)
- if entry := GetBody(db, hash, 0); entry == nil {
- t.Fatalf("Stored body not found")
- } else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(types.Transactions(body.Transactions)) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) {
- t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, body)
- }
- if entry := GetBodyRLP(db, hash, 0); entry == nil {
- t.Fatalf("Stored body RLP not found")
- } else {
- hasher := sha3.NewLegacyKeccak256()
- hasher.Write(entry)
-
- if calc := common.BytesToHash(hasher.Sum(nil)); calc != hash {
- t.Fatalf("Retrieved RLP body mismatch: have %v, want %v", entry, body)
- }
- }
- // Delete the body and verify the execution
- DeleteBody(db, hash, 0)
- if entry := GetBody(db, hash, 0); entry != nil {
- t.Fatalf("Deleted body returned: %v", entry)
- }
-}
-
-// Tests block storage and retrieval operations.
-func TestBlockStorage(t *testing.T) {
- db := rawdb.NewMemoryDatabase()
-
- // Create a test block to move around the database and make sure it's really new
- block := types.NewBlockWithHeader(&types.Header{
- Extra: []byte("test block"),
- UncleHash: types.EmptyUncleHash,
- TxHash: types.EmptyRootHash,
- ReceiptHash: types.EmptyRootHash,
- })
- if entry := GetBlock(db, block.Hash(), block.NumberU64()); entry != nil {
- t.Fatalf("Non existent block returned: %v", entry)
- }
- if entry := GetHeader(db, block.Hash(), block.NumberU64()); entry != nil {
- t.Fatalf("Non existent header returned: %v", entry)
- }
- if entry := GetBody(db, block.Hash(), block.NumberU64()); entry != nil {
- t.Fatalf("Non existent body returned: %v", entry)
- }
- // Write and verify the block in the database
- rawdb.WriteBlock(db, block)
- if entry := GetBlock(db, block.Hash(), block.NumberU64()); entry == nil {
- t.Fatalf("Stored block not found")
- } else if entry.Hash() != block.Hash() {
- t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block)
- }
- if entry := GetHeader(db, block.Hash(), block.NumberU64()); entry == nil {
- t.Fatalf("Stored header not found")
- } else if entry.Hash() != block.Header().Hash() {
- t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, block.Header())
- }
- if entry := GetBody(db, block.Hash(), block.NumberU64()); entry == nil {
- t.Fatalf("Stored body not found")
- } else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(block.Transactions()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) {
- t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, block.Body())
- }
- // Delete the block and verify the execution
- DeleteBlock(db, block.Hash(), block.NumberU64())
- if entry := GetBlock(db, block.Hash(), block.NumberU64()); entry != nil {
- t.Fatalf("Deleted block returned: %v", entry)
- }
- if entry := GetHeader(db, block.Hash(), block.NumberU64()); entry != nil {
- t.Fatalf("Deleted header returned: %v", entry)
- }
- if entry := GetBody(db, block.Hash(), block.NumberU64()); entry != nil {
- t.Fatalf("Deleted body returned: %v", entry)
- }
-}
-
-// Tests that partial block contents don't get reassembled into full blocks.
-func TestPartialBlockStorage(t *testing.T) {
- db := rawdb.NewMemoryDatabase()
- block := types.NewBlockWithHeader(&types.Header{
- Extra: []byte("test block"),
- UncleHash: types.EmptyUncleHash,
- TxHash: types.EmptyRootHash,
- ReceiptHash: types.EmptyRootHash,
- })
- // Store a header and check that it's not recognized as a block
- rawdb.WriteHeader(db, block.Header())
- if entry := GetBlock(db, block.Hash(), block.NumberU64()); entry != nil {
- t.Fatalf("Non existent block returned: %v", entry)
- }
- DeleteHeader(db, block.Hash(), block.NumberU64())
-
- // Store a body and check that it's not recognized as a block
- rawdb.WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
- if entry := GetBlock(db, block.Hash(), block.NumberU64()); entry != nil {
- t.Fatalf("Non existent block returned: %v", entry)
- }
- DeleteBody(db, block.Hash(), block.NumberU64())
-
- // Store a header and a body separately and check reassembly
- rawdb.WriteHeader(db, block.Header())
- rawdb.WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
- if entry := GetBlock(db, block.Hash(), block.NumberU64()); entry == nil {
- t.Fatalf("Stored block not found")
- } else if entry.Hash() != block.Hash() {
- t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block)
- }
-}
-
-// Tests block total difficulty storage and retrieval operations.
-func TestTdStorage(t *testing.T) {
- db := rawdb.NewMemoryDatabase()
-
- // Create a test TD to move around the database and make sure it's really new
- hash, td := common.Hash{}, big.NewInt(314)
- if entry := GetTd(db, hash, 0); entry != nil {
- t.Fatalf("Non existent TD returned: %v", entry)
- }
- // Write and verify the TD in the database
- if err := WriteTd(db, hash, 0, td); err != nil {
- t.Fatalf("Failed to write TD into database: %v", err)
- }
- if entry := GetTd(db, hash, 0); entry == nil {
- t.Fatalf("Stored TD not found")
- } else if entry.Cmp(td) != 0 {
- t.Fatalf("Retrieved TD mismatch: have %v, want %v", entry, td)
- }
- // Delete the TD and verify the execution
- DeleteTd(db, hash, 0)
- if entry := GetTd(db, hash, 0); entry != nil {
- t.Fatalf("Deleted TD returned: %v", entry)
- }
-}
-
-// Tests that canonical numbers can be mapped to hashes and retrieved.
-func TestCanonicalMappingStorage(t *testing.T) {
- db := rawdb.NewMemoryDatabase()
-
- // Create a test canonical number and assinged hash to move around
- hash, number := common.Hash{0: 0xff}, uint64(314)
- if entry := GetCanonicalHash(db, number); entry != (common.Hash{}) {
- t.Fatalf("Non existent canonical mapping returned: %v", entry)
- }
- // Write and verify the TD in the database
- rawdb.WriteCanonicalHash(db, hash, number)
- if entry := GetCanonicalHash(db, number); entry == (common.Hash{}) {
- t.Fatalf("Stored canonical mapping not found")
- } else if entry != hash {
- t.Fatalf("Retrieved canonical mapping mismatch: have %v, want %v", entry, hash)
- }
- // Delete the TD and verify the execution
- DeleteCanonicalHash(db, number)
- if entry := GetCanonicalHash(db, number); entry != (common.Hash{}) {
- t.Fatalf("Deleted canonical mapping returned: %v", entry)
- }
-}
-
-// Tests that head headers and head blocks can be assigned, individually.
-func TestHeadStorage(t *testing.T) {
- db := rawdb.NewMemoryDatabase()
-
- blockHead := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block header")})
- blockFull := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block full")})
- blockFast := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block fast")})
-
- // Check that no head entries are in a pristine database
- if entry := GetHeadHeaderHash(db); entry != (common.Hash{}) {
- t.Fatalf("Non head header entry returned: %v", entry)
- }
- if entry := GetHeadBlockHash(db); entry != (common.Hash{}) {
- t.Fatalf("Non head block entry returned: %v", entry)
- }
- if entry := GetHeadFastBlockHash(db); entry != (common.Hash{}) {
- t.Fatalf("Non fast head block entry returned: %v", entry)
- }
- // Assign separate entries for the head header and block
- if err := WriteHeadHeaderHash(db, blockHead.Hash()); err != nil {
- t.Fatalf("Failed to write head header hash: %v", err)
- }
- rawdb.WriteHeadBlockHash(db, blockFull.Hash())
- if err := WriteHeadFastBlockHash(db, blockFast.Hash()); err != nil {
- t.Fatalf("Failed to write fast head block hash: %v", err)
- }
- // Check that both heads are present, and different (i.e. two heads maintained)
- if entry := GetHeadHeaderHash(db); entry != blockHead.Hash() {
- t.Fatalf("Head header hash mismatch: have %v, want %v", entry, blockHead.Hash())
- }
- if entry := GetHeadBlockHash(db); entry != blockFull.Hash() {
- t.Fatalf("Head block hash mismatch: have %v, want %v", entry, blockFull.Hash())
- }
- if entry := GetHeadFastBlockHash(db); entry != blockFast.Hash() {
- t.Fatalf("Fast head block hash mismatch: have %v, want %v", entry, blockFast.Hash())
- }
-}
-
-// Tests that positional lookup metadata can be stored and retrieved.
-func TestLookupStorage(t *testing.T) {
- db := rawdb.NewMemoryDatabase()
-
- tx1 := types.NewTransaction(1, common.BytesToAddress([]byte{0x11}), big.NewInt(111), 1111, big.NewInt(11111), []byte{0x11, 0x11, 0x11})
- tx2 := types.NewTransaction(2, common.BytesToAddress([]byte{0x22}), big.NewInt(222), 2222, big.NewInt(22222), []byte{0x22, 0x22, 0x22})
- tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33})
- txs := []*types.Transaction{tx1, tx2, tx3}
-
- block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil)
-
- // Check that no transactions entries are in a pristine database
- for i, tx := range txs {
- if txn, _, _, _ := GetTransaction(db, tx.Hash()); txn != nil {
- t.Fatalf("tx #%d [%x]: non existent transaction returned: %v", i, tx.Hash(), txn)
- }
- }
- // Insert all the transactions into the database, and verify contents
- rawdb.WriteBlock(db, block)
- if err := WriteTxLookupEntries(db, block); err != nil {
- t.Fatalf("failed to write transactions: %v", err)
- }
- for i, tx := range txs {
- if txn, hash, number, index := GetTransaction(db, tx.Hash()); txn == nil {
- t.Fatalf("tx #%d [%x]: transaction not found", i, tx.Hash())
- } else {
- if hash != block.Hash() || number != block.NumberU64() || index != uint64(i) {
- t.Fatalf("tx #%d [%x]: positional metadata mismatch: have %x/%d/%d, want %x/%v/%v", i, tx.Hash(), hash, number, index, block.Hash(), block.NumberU64(), i)
- }
- if tx.String() != txn.String() {
- t.Fatalf("tx #%d [%x]: transaction mismatch: have %v, want %v", i, tx.Hash(), txn, tx)
- }
- }
- }
- // Delete the transactions and check purge
- for i, tx := range txs {
- rawdb.DeleteTxLookupEntry(db, tx.Hash())
- if txn, _, _, _ := GetTransaction(db, tx.Hash()); txn != nil {
- t.Fatalf("tx #%d [%x]: deleted transaction returned: %v", i, tx.Hash(), txn)
- }
- }
-}
-
-// Tests that receipts associated with a single block can be stored and retrieved.
-func TestBlockReceiptStorage(t *testing.T) {
- db := rawdb.NewMemoryDatabase()
-
- // Create a live block since we need metadata to reconstruct the receipt
- tx1 := types.NewTransaction(1, common.HexToAddress("0x1"), big.NewInt(1), 1, big.NewInt(1), nil)
- tx2 := types.NewTransaction(2, common.HexToAddress("0x2"), big.NewInt(2), 2, big.NewInt(2), nil)
-
- receipt1 := &types.Receipt{
- Status: types.ReceiptStatusFailed,
- CumulativeGasUsed: 1,
- Logs: []*types.Log{
- {Address: common.BytesToAddress([]byte{0x11})},
- {Address: common.BytesToAddress([]byte{0x01, 0x11})},
- },
- TxHash: tx1.Hash(),
- ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}),
- GasUsed: 111111,
- }
- receipt1.Bloom = types.CreateBloom(types.Receipts{receipt1})
-
- receipt2 := &types.Receipt{
- PostState: common.Hash{2}.Bytes(),
- CumulativeGasUsed: 2,
- Logs: []*types.Log{
- {Address: common.BytesToAddress([]byte{0x22})},
- {Address: common.BytesToAddress([]byte{0x02, 0x22})},
- },
- TxHash: tx2.Hash(),
- ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}),
- GasUsed: 222222,
- }
- receipt2.Bloom = types.CreateBloom(types.Receipts{receipt2})
-
- receipts := []*types.Receipt{receipt1, receipt2}
-
- // Check that no receipt entries are in a pristine database
- hash := common.BytesToHash([]byte{0x03, 0x14})
- if rs := GetBlockReceipts(db, hash, 0); len(rs) != 0 {
- t.Fatalf("non existent receipts returned: %v", rs)
- }
- // Insert the receipt slice into the database and check presence
- if err := WriteBlockReceipts(db, hash, 0, receipts); err != nil {
- t.Fatalf("failed to write block receipts: %v", err)
- }
- if rs := GetBlockReceipts(db, hash, 0); len(rs) == 0 {
- t.Fatalf("no receipts returned")
- } else {
- for i := 0; i < len(receipts); i++ {
- rlpHave, _ := rlp.EncodeToBytes(rs[i])
- rlpWant, _ := rlp.EncodeToBytes(receipts[i])
-
- if !bytes.Equal(rlpHave, rlpWant) {
- t.Fatalf("receipt #%d: receipt mismatch: have %v, want %v", i, rs[i], receipts[i])
- }
- }
- }
- // Delete the receipt slice and check purge
- DeleteBlockReceipts(db, hash, 0)
- if rs := GetBlockReceipts(db, hash, 0); len(rs) != 0 {
- t.Fatalf("deleted receipts returned: %v", rs)
- }
-}
diff --git a/core/genesis.go b/core/genesis.go
index 2f69485c42c7..ef8541970ae0 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -160,7 +160,7 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig
}
// Just commit the new block if there is no stored genesis block.
- stored := GetCanonicalHash(db, 0)
+ stored := rawdb.ReadCanonicalHash(db, 0)
if (stored == common.Hash{}) {
if genesis == nil {
log.Info("Writing default main-net genesis block")
@@ -182,14 +182,11 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig
// Get the existing chain configuration.
newcfg := genesis.configOrDefault(stored)
- storedcfg, err := GetChainConfig(db, stored)
- if err != nil {
- if err == ErrChainConfigNotFound {
- // This case happens if a genesis write was interrupted.
- log.Warn("Found genesis block without chain config")
- err = WriteChainConfig(db, stored, newcfg)
- }
- return newcfg, stored, err
+ storedcfg, _ := rawdb.ReadChainConfig(db, stored)
+ if storedcfg == nil {
+ log.Warn("Found genesis block without chain config")
+ rawdb.WriteChainConfig(db, stored, newcfg)
+ return newcfg, stored, nil
}
// Special case: don't change the existing config of a non-xinfin chain if no new
@@ -201,15 +198,16 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig
// Check config compatibility and write the config. Compatibility errors
// are returned to the caller unless we're already at block zero.
- height := GetBlockNumber(db, GetHeadHeaderHash(db))
- if height == missingNumber {
+ height := rawdb.ReadHeaderNumber(db, rawdb.ReadHeadHeaderHash(db))
+ if height == nil {
return newcfg, stored, errors.New("missing block number for head header hash")
}
- compatErr := storedcfg.CheckCompatible(newcfg, height)
- if compatErr != nil && height != 0 && compatErr.RewindTo != 0 {
+ compatErr := storedcfg.CheckCompatible(newcfg, *height)
+ if compatErr != nil && *height != 0 && compatErr.RewindTo != 0 {
return newcfg, stored, compatErr
}
- return newcfg, stored, WriteChainConfig(db, stored, newcfg)
+ rawdb.WriteChainConfig(db, stored, newcfg)
+ return newcfg, stored, nil
}
func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
@@ -288,23 +286,17 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
if block.Number().Sign() != 0 {
return nil, errors.New("can't commit genesis block with number > 0")
}
- if err := WriteTd(db, block.Hash(), block.NumberU64(), g.Difficulty); err != nil {
- return nil, err
- }
+ rawdb.WriteTd(db, block.Hash(), block.NumberU64(), g.Difficulty)
rawdb.WriteBlock(db, block)
- if err := WriteBlockReceipts(db, block.Hash(), block.NumberU64(), nil); err != nil {
- return nil, err
- }
+ rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil)
rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64())
rawdb.WriteHeadBlockHash(db, block.Hash())
- if err := WriteHeadHeaderHash(db, block.Hash()); err != nil {
- return nil, err
- }
+ rawdb.WriteHeadHeaderHash(db, block.Hash())
config := g.Config
if config == nil {
config = params.AllEthashProtocolChanges
}
- return block, WriteChainConfig(db, block.Hash(), config)
+ return block, rawdb.WriteChainConfig(db, block.Hash(), config)
}
// MustCommit writes the genesis block and state to db, panicking on error.
diff --git a/core/genesis_test.go b/core/genesis_test.go
index ee8f42512ac6..a05d92486550 100644
--- a/core/genesis_test.go
+++ b/core/genesis_test.go
@@ -156,7 +156,7 @@ func TestSetupGenesis(t *testing.T) {
t.Errorf("%s: returned hash %s, want %s", test.name, hash.Hex(), test.wantHash.Hex())
} else if err == nil {
// Check database content.
- stored := GetBlock(db, test.wantHash, 0)
+ stored := rawdb.ReadBlock(db, test.wantHash, 0)
if stored.Hash() != test.wantHash {
t.Errorf("%s: block in DB has hash %s, want %s", test.name, stored.Hash(), test.wantHash)
}
diff --git a/core/headerchain.go b/core/headerchain.go
index 633803f2faab..5f6553e24d7d 100644
--- a/core/headerchain.go
+++ b/core/headerchain.go
@@ -100,7 +100,7 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c
}
hc.currentHeader.Store(hc.genesisHeader)
- if head := GetHeadBlockHash(chainDb); head != (common.Hash{}) {
+ if head := rawdb.ReadHeadBlockHash(chainDb); head != (common.Hash{}) {
if chead := hc.GetHeaderByHash(head); chead != nil {
hc.currentHeader.Store(chead)
}
@@ -113,13 +113,14 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c
// GetBlockNumber retrieves the block number belonging to the given hash
// from the cache or database
-func (hc *HeaderChain) GetBlockNumber(hash common.Hash) uint64 {
+func (hc *HeaderChain) GetBlockNumber(hash common.Hash) *uint64 {
if cached, ok := hc.numberCache.Get(hash); ok {
- return cached
+ number := cached
+ return &number
}
- number := GetBlockNumber(hc.chainDb, hash)
- if number != missingNumber {
- hc.numberCache.Add(hash, number)
+ number := rawdb.ReadHeaderNumber(hc.chainDb, hash)
+ if number != nil {
+ hc.numberCache.Add(hash, *number)
}
return number
}
@@ -330,7 +331,7 @@ func (hc *HeaderChain) GetTd(hash common.Hash, number uint64) *big.Int {
if cached, ok := hc.tdCache.Get(hash); ok {
return cached
}
- td := GetTd(hc.chainDb, hash, number)
+ td := rawdb.ReadTd(hc.chainDb, hash, number)
if td == nil {
return nil
}
@@ -342,7 +343,11 @@ func (hc *HeaderChain) GetTd(hash common.Hash, number uint64) *big.Int {
// GetTdByHash retrieves a block's total difficulty in the canonical chain from the
// database by hash, caching it if found.
func (hc *HeaderChain) GetTdByHash(hash common.Hash) *big.Int {
- return hc.GetTd(hash, hc.GetBlockNumber(hash))
+ number := hc.GetBlockNumber(hash)
+ if number == nil {
+ return nil
+ }
+ return hc.GetTd(hash, *number)
}
// GetHeader retrieves a block header from the database by hash and number,
@@ -352,7 +357,7 @@ func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header
if header, ok := hc.headerCache.Get(hash); ok {
return header
}
- header := GetHeader(hc.chainDb, hash, number)
+ header := rawdb.ReadHeader(hc.chainDb, hash, number)
if header == nil {
return nil
}
@@ -364,7 +369,11 @@ func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header
// GetHeaderByHash retrieves a block header from the database by hash, caching it if
// found.
func (hc *HeaderChain) GetHeaderByHash(hash common.Hash) *types.Header {
- return hc.GetHeader(hash, hc.GetBlockNumber(hash))
+ number := hc.GetBlockNumber(hash)
+ if number == nil {
+ return nil
+ }
+ return hc.GetHeader(hash, *number)
}
// HasHeader checks if a block header is present in the database or not.
@@ -380,7 +389,7 @@ func (hc *HeaderChain) HasHeader(hash common.Hash, number uint64) bool {
// GetHeaderByNumber retrieves a block header from the database by number,
// caching it (associated with its hash) if found.
func (hc *HeaderChain) GetHeaderByNumber(number uint64) *types.Header {
- hash := GetCanonicalHash(hc.chainDb, number)
+ hash := rawdb.ReadCanonicalHash(hc.chainDb, number)
if hash == (common.Hash{}) {
return nil
}
@@ -388,8 +397,7 @@ func (hc *HeaderChain) GetHeaderByNumber(number uint64) *types.Header {
}
func (hc *HeaderChain) GetCanonicalHash(number uint64) common.Hash {
- // TODO: return rawdb.ReadCanonicalHash(hc.chainDb, number)
- return GetCanonicalHash(hc.chainDb, number)
+ return rawdb.ReadCanonicalHash(hc.chainDb, number)
}
// CurrentHeader retrieves the current head header of the canonical chain. The
diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go
index b8cb9cbd7650..f968dcd76ef8 100644
--- a/core/rawdb/accessors_chain.go
+++ b/core/rawdb/accessors_chain.go
@@ -90,6 +90,19 @@ func DeleteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash) {
}
}
+// ReadHeadHeaderHash retrieves the hash of the current canonical head block's
+// header. The difference between this and GetHeadBlockHash is that whereas the
+// last block hash is only updated upon a full block import, the last header
+// hash is updated already at header import, allowing head tracking for the
+// light synchronization mechanism.
+func ReadHeadHeaderHash(db ethdb.Reader) common.Hash {
+ data, _ := db.Get(headHeaderKey)
+ if len(data) == 0 {
+ return common.Hash{}
+ }
+ return common.BytesToHash(data)
+}
+
// WriteHeadHeaderHash stores the hash of the current canonical head header.
func WriteHeadHeaderHash(db ethdb.KeyValueWriter, hash common.Hash) {
if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
@@ -97,6 +110,15 @@ func WriteHeadHeaderHash(db ethdb.KeyValueWriter, hash common.Hash) {
}
}
+// ReadHeadBlockHash retrieves the hash of the current canonical head block.
+func ReadHeadBlockHash(db ethdb.KeyValueReader) common.Hash {
+ data, _ := db.Get(headBlockKey)
+ if len(data) == 0 {
+ return common.Hash{}
+ }
+ return common.BytesToHash(data)
+}
+
// WriteHeadBlockHash stores the head block's hash.
func WriteHeadBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
@@ -104,6 +126,18 @@ func WriteHeadBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
}
}
+// ReadHeadFastBlockHash retrieves the hash of the current canonical head block during
+// fast synchronization. The difference between this and GetHeadBlockHash is that
+// whereas the last block hash is only updated upon a full block import, the last
+// fast hash is updated when importing pre-processed blocks.
+func ReadHeadFastBlockHash(db ethdb.KeyValueReader) common.Hash {
+ data, _ := db.Get(headFastBlockKey)
+ if len(data) == 0 {
+ return common.Hash{}
+ }
+ return common.BytesToHash(data)
+}
+
// WriteHeadFastBlockHash stores the hash of the current fast-sync head block.
func WriteHeadFastBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
if err := db.Put(headFastBlockKey, hash.Bytes()); err != nil {
@@ -111,6 +145,25 @@ func WriteHeadFastBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
}
}
+// ReadFastTrieProgress retrieves the number of tries nodes fast synced to allow
+// reportinc correct numbers across restarts.
+func ReadFastTrieProgress(db ethdb.KeyValueReader) uint64 {
+ data, _ := db.Get(trieSyncKey)
+ if len(data) == 0 {
+ return 0
+ }
+ return new(big.Int).SetBytes(data).Uint64()
+}
+
+// WriteFastTrieProgress stores the fast sync trie process counter to support
+// retrieving it across restarts.
+func WriteFastTrieProgress(db ethdb.KeyValueWriter, count uint64) error {
+ if err := db.Put(trieSyncKey, new(big.Int).SetUint64(count).Bytes()); err != nil {
+ log.Crit("Failed to store fast sync trie progress", "err", err)
+ }
+ return nil
+}
+
// ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
// First try to look up the data in ancient database. Extra hash
@@ -236,6 +289,18 @@ func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp
}
}
+// HasBody verifies the existence of a block body corresponding to the hash.
+func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool {
+ //TODO: need to add isCanon check
+ // if isCanon(db, number, hash) {
+ // return true
+ // }
+ if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil {
+ return false
+ }
+ return true
+}
+
// ReadBody retrieves the block body corresponding to the hash.
func ReadBody(db ethdb.Reader, hash common.Hash, number uint64) *types.Body {
data := ReadBodyRLP(db, hash, number)
@@ -266,6 +331,21 @@ func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
}
}
+// ReadTd retrieves a block's total difficulty corresponding to the hash, nil if
+// none found.
+func ReadTd(db ethdb.Reader, hash common.Hash, number uint64) *big.Int {
+ data, _ := db.Get(headerTDKey(number, hash))
+ if len(data) == 0 {
+ return nil
+ }
+ td := new(big.Int)
+ if err := rlp.Decode(bytes.NewReader(data), td); err != nil {
+ log.Error("Invalid block total difficulty RLP", "hash", hash, "err", err)
+ return nil
+ }
+ return td
+}
+
// WriteTd stores the total difficulty of a block into the database.
func WriteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64, td *big.Int) {
data, err := rlp.EncodeToBytes(td)
@@ -471,8 +551,36 @@ func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64) [][]*types.Log {
return logs
}
+// ReadBlock retrieves an entire block corresponding to the hash, assembling it
+// back from the stored header and body. If either the header or body could not
+// be retrieved nil is returned.
+//
+// Note, due to concurrent download of header and block body the header and thus
+// canonical hash can be stored in the database but the body data not (yet).
+func ReadBlock(db ethdb.Reader, hash common.Hash, number uint64) *types.Block {
+ // Retrieve the block header and body contents
+ header := ReadHeader(db, hash, number)
+ if header == nil {
+ return nil
+ }
+ body := ReadBody(db, hash, number)
+ if body == nil {
+ return nil
+ }
+ // Reassemble the block and return
+ return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles)
+}
+
// WriteBlock serializes a block into the database, header and body separately.
func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) {
WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
WriteHeader(db, block.Header())
}
+
+// DeleteBlock removes all block data associated with a hash.
+func DeleteBlock(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
+ DeleteReceipts(db, hash, number)
+ DeleteHeader(db, hash, number)
+ DeleteBody(db, hash, number)
+ DeleteTd(db, hash, number)
+}
diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go
index 5ecd5ed6c770..d3d2b0e84a77 100644
--- a/core/rawdb/accessors_chain_test.go
+++ b/core/rawdb/accessors_chain_test.go
@@ -27,6 +27,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/core/types"
"github.com/XinFinOrg/XDPoSChain/params"
"github.com/XinFinOrg/XDPoSChain/rlp"
+ "golang.org/x/crypto/sha3"
)
type fullLogRLP struct {
@@ -53,6 +54,307 @@ func newFullLogRLP(l *types.Log) *fullLogRLP {
}
}
+// Tests block header storage and retrieval operations.
+func TestHeaderStorage(t *testing.T) {
+ db := NewMemoryDatabase()
+
+ // Create a test header to move around the database and make sure it's really new
+ header := &types.Header{Number: big.NewInt(42), Extra: []byte("test header")}
+ if entry := ReadHeader(db, header.Hash(), header.Number.Uint64()); entry != nil {
+ t.Fatalf("Non existent header returned: %v", entry)
+ }
+ // Write and verify the header in the database
+ WriteHeader(db, header)
+ if entry := ReadHeader(db, header.Hash(), header.Number.Uint64()); entry == nil {
+ t.Fatalf("Stored header not found")
+ } else if entry.Hash() != header.Hash() {
+ t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, header)
+ }
+ if entry := ReadHeaderRLP(db, header.Hash(), header.Number.Uint64()); entry == nil {
+ t.Fatalf("Stored header RLP not found")
+ } else {
+ hasher := sha3.NewLegacyKeccak256()
+ hasher.Write(entry)
+
+ if hash := common.BytesToHash(hasher.Sum(nil)); hash != header.Hash() {
+ t.Fatalf("Retrieved RLP header mismatch: have %v, want %v", entry, header)
+ }
+ }
+ // Delete the header and verify the execution
+ DeleteHeader(db, header.Hash(), header.Number.Uint64())
+ if entry := ReadHeader(db, header.Hash(), header.Number.Uint64()); entry != nil {
+ t.Fatalf("Deleted header returned: %v", entry)
+ }
+}
+
+// Tests block body storage and retrieval operations.
+func TestBodyStorage(t *testing.T) {
+ db := NewMemoryDatabase()
+
+ // Create a test body to move around the database and make sure it's really new
+ body := &types.Body{Uncles: []*types.Header{{Extra: []byte("test header")}}}
+
+ hasher := sha3.NewLegacyKeccak256()
+ rlp.Encode(hasher, body)
+ hash := common.BytesToHash(hasher.Sum(nil))
+
+ if entry := ReadBody(db, hash, 0); entry != nil {
+ t.Fatalf("Non existent body returned: %v", entry)
+ }
+ // Write and verify the body in the database
+ WriteBody(db, hash, 0, body)
+ if entry := ReadBody(db, hash, 0); entry == nil {
+ t.Fatalf("Stored body not found")
+ } else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(types.Transactions(body.Transactions)) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) {
+ t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, body)
+ }
+ if entry := ReadBodyRLP(db, hash, 0); entry == nil {
+ t.Fatalf("Stored body RLP not found")
+ } else {
+ hasher := sha3.NewLegacyKeccak256()
+ hasher.Write(entry)
+
+ if calc := common.BytesToHash(hasher.Sum(nil)); calc != hash {
+ t.Fatalf("Retrieved RLP body mismatch: have %v, want %v", entry, body)
+ }
+ }
+ // Delete the body and verify the execution
+ DeleteBody(db, hash, 0)
+ if entry := ReadBody(db, hash, 0); entry != nil {
+ t.Fatalf("Deleted body returned: %v", entry)
+ }
+}
+
+// Tests block storage and retrieval operations.
+func TestBlockStorage(t *testing.T) {
+ db := NewMemoryDatabase()
+
+ // Create a test block to move around the database and make sure it's really new
+ block := types.NewBlockWithHeader(&types.Header{
+ Extra: []byte("test block"),
+ UncleHash: types.EmptyUncleHash,
+ TxHash: types.EmptyRootHash,
+ ReceiptHash: types.EmptyRootHash,
+ })
+ if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil {
+ t.Fatalf("Non existent block returned: %v", entry)
+ }
+ if entry := ReadHeader(db, block.Hash(), block.NumberU64()); entry != nil {
+ t.Fatalf("Non existent header returned: %v", entry)
+ }
+ if entry := ReadBody(db, block.Hash(), block.NumberU64()); entry != nil {
+ t.Fatalf("Non existent body returned: %v", entry)
+ }
+ // Write and verify the block in the database
+ WriteBlock(db, block)
+ if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry == nil {
+ t.Fatalf("Stored block not found")
+ } else if entry.Hash() != block.Hash() {
+ t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block)
+ }
+ if entry := ReadHeader(db, block.Hash(), block.NumberU64()); entry == nil {
+ t.Fatalf("Stored header not found")
+ } else if entry.Hash() != block.Header().Hash() {
+ t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, block.Header())
+ }
+ if entry := ReadBody(db, block.Hash(), block.NumberU64()); entry == nil {
+ t.Fatalf("Stored body not found")
+ } else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(block.Transactions()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) {
+ t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, block.Body())
+ }
+ // Delete the block and verify the execution
+ DeleteBlock(db, block.Hash(), block.NumberU64())
+ if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil {
+ t.Fatalf("Deleted block returned: %v", entry)
+ }
+ if entry := ReadHeader(db, block.Hash(), block.NumberU64()); entry != nil {
+ t.Fatalf("Deleted header returned: %v", entry)
+ }
+ if entry := ReadBody(db, block.Hash(), block.NumberU64()); entry != nil {
+ t.Fatalf("Deleted body returned: %v", entry)
+ }
+}
+
+// Tests that partial block contents don't get reassembled into full blocks.
+func TestPartialBlockStorage(t *testing.T) {
+ db := NewMemoryDatabase()
+ block := types.NewBlockWithHeader(&types.Header{
+ Extra: []byte("test block"),
+ UncleHash: types.EmptyUncleHash,
+ TxHash: types.EmptyRootHash,
+ ReceiptHash: types.EmptyRootHash,
+ })
+ // Store a header and check that it's not recognized as a block
+ WriteHeader(db, block.Header())
+ if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil {
+ t.Fatalf("Non existent block returned: %v", entry)
+ }
+ DeleteHeader(db, block.Hash(), block.NumberU64())
+
+ // Store a body and check that it's not recognized as a block
+ WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
+ if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil {
+ t.Fatalf("Non existent block returned: %v", entry)
+ }
+ DeleteBody(db, block.Hash(), block.NumberU64())
+
+ // Store a header and a body separately and check reassembly
+ WriteHeader(db, block.Header())
+ WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
+ if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry == nil {
+ t.Fatalf("Stored block not found")
+ } else if entry.Hash() != block.Hash() {
+ t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block)
+ }
+}
+
+// Tests block total difficulty storage and retrieval operations.
+func TestTdStorage(t *testing.T) {
+ db := NewMemoryDatabase()
+
+ // Create a test TD to move around the database and make sure it's really new
+ hash, td := common.Hash{}, big.NewInt(314)
+ if entry := ReadTd(db, hash, 0); entry != nil {
+ t.Fatalf("Non existent TD returned: %v", entry)
+ }
+ // Write and verify the TD in the database
+ WriteTd(db, hash, 0, td)
+ if entry := ReadTd(db, hash, 0); entry == nil {
+ t.Fatalf("Stored TD not found")
+ } else if entry.Cmp(td) != 0 {
+ t.Fatalf("Retrieved TD mismatch: have %v, want %v", entry, td)
+ }
+ // Delete the TD and verify the execution
+ DeleteTd(db, hash, 0)
+ if entry := ReadTd(db, hash, 0); entry != nil {
+ t.Fatalf("Deleted TD returned: %v", entry)
+ }
+}
+
+// Tests that canonical numbers can be mapped to hashes and retrieved.
+func TestCanonicalMappingStorage(t *testing.T) {
+ db := NewMemoryDatabase()
+
+ // Create a test canonical number and assinged hash to move around
+ hash, number := common.Hash{0: 0xff}, uint64(314)
+ if entry := ReadCanonicalHash(db, number); entry != (common.Hash{}) {
+ t.Fatalf("Non existent canonical mapping returned: %v", entry)
+ }
+ // Write and verify the TD in the database
+ WriteCanonicalHash(db, hash, number)
+ if entry := ReadCanonicalHash(db, number); entry == (common.Hash{}) {
+ t.Fatalf("Stored canonical mapping not found")
+ } else if entry != hash {
+ t.Fatalf("Retrieved canonical mapping mismatch: have %v, want %v", entry, hash)
+ }
+ // Delete the TD and verify the execution
+ DeleteCanonicalHash(db, number)
+ if entry := ReadCanonicalHash(db, number); entry != (common.Hash{}) {
+ t.Fatalf("Deleted canonical mapping returned: %v", entry)
+ }
+}
+
+// Tests that head headers and head blocks can be assigned, individually.
+func TestHeadStorage(t *testing.T) {
+ db := NewMemoryDatabase()
+
+ blockHead := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block header")})
+ blockFull := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block full")})
+ blockFast := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block fast")})
+
+ // Check that no head entries are in a pristine database
+ if entry := ReadHeadHeaderHash(db); entry != (common.Hash{}) {
+ t.Fatalf("Non head header entry returned: %v", entry)
+ }
+ if entry := ReadHeadBlockHash(db); entry != (common.Hash{}) {
+ t.Fatalf("Non head block entry returned: %v", entry)
+ }
+ if entry := ReadHeadFastBlockHash(db); entry != (common.Hash{}) {
+ t.Fatalf("Non fast head block entry returned: %v", entry)
+ }
+ // Assign separate entries for the head header and block
+ WriteHeadHeaderHash(db, blockHead.Hash())
+ WriteHeadBlockHash(db, blockFull.Hash())
+ WriteHeadFastBlockHash(db, blockFast.Hash())
+ // Check that both heads are present, and different (i.e. two heads maintained)
+ if entry := ReadHeadHeaderHash(db); entry != blockHead.Hash() {
+ t.Fatalf("Head header hash mismatch: have %v, want %v", entry, blockHead.Hash())
+ }
+ if entry := ReadHeadBlockHash(db); entry != blockFull.Hash() {
+ t.Fatalf("Head block hash mismatch: have %v, want %v", entry, blockFull.Hash())
+ }
+ if entry := ReadHeadFastBlockHash(db); entry != blockFast.Hash() {
+ t.Fatalf("Fast head block hash mismatch: have %v, want %v", entry, blockFast.Hash())
+ }
+}
+
+// Tests that receipts associated with a single block can be stored and retrieved.
+func TestBlockReceiptStorage(t *testing.T) {
+ db := NewMemoryDatabase()
+
+ // Create a live block since we need metadata to reconstruct the receipt
+ tx1 := types.NewTransaction(1, common.HexToAddress("0x1"), big.NewInt(1), 1, big.NewInt(1), nil)
+ tx2 := types.NewTransaction(2, common.HexToAddress("0x2"), big.NewInt(2), 2, big.NewInt(2), nil)
+
+ body := &types.Body{Transactions: types.Transactions{tx1, tx2}}
+
+ receipt1 := &types.Receipt{
+ Status: types.ReceiptStatusFailed,
+ CumulativeGasUsed: 1,
+ Logs: []*types.Log{
+ {Address: common.BytesToAddress([]byte{0x11})},
+ {Address: common.BytesToAddress([]byte{0x01, 0x11})},
+ },
+ TxHash: tx1.Hash(),
+ ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}),
+ GasUsed: 111111,
+ }
+ receipt1.Bloom = types.CreateBloom(types.Receipts{receipt1})
+
+ receipt2 := &types.Receipt{
+ PostState: common.Hash{2}.Bytes(),
+ CumulativeGasUsed: 2,
+ Logs: []*types.Log{
+ {Address: common.BytesToAddress([]byte{0x22})},
+ {Address: common.BytesToAddress([]byte{0x02, 0x22})},
+ },
+ TxHash: tx2.Hash(),
+ ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}),
+ GasUsed: 222222,
+ }
+ receipt2.Bloom = types.CreateBloom(types.Receipts{receipt2})
+
+ receipts := []*types.Receipt{receipt1, receipt2}
+
+ // Check that no receipt entries are in a pristine database
+ hash := common.BytesToHash([]byte{0x03, 0x14})
+ if rs := ReadReceipts(db, hash, 0, params.TestChainConfig); len(rs) != 0 {
+ t.Fatalf("non existent receipts returned: %v", rs)
+ }
+ // Insert the body that corresponds to the receipts
+ WriteBody(db, hash, 0, body)
+
+ // Insert the receipt slice into the database and check presence
+ WriteReceipts(db, hash, 0, receipts)
+ if rs := ReadReceipts(db, hash, 0, params.TestChainConfig); len(rs) == 0 {
+ t.Fatalf("no receipts returned")
+ } else {
+ for i := 0; i < len(receipts); i++ {
+ rlpHave, _ := rlp.EncodeToBytes(rs[i])
+ rlpWant, _ := rlp.EncodeToBytes(receipts[i])
+
+ if !bytes.Equal(rlpHave, rlpWant) {
+ t.Fatalf("receipt #%d: receipt mismatch: have %v, want %v", i, rs[i], receipts[i])
+ }
+ }
+ }
+ // Delete the receipt slice and check purge
+ DeleteReceipts(db, hash, 0)
+ if rs := ReadReceipts(db, hash, 0, params.TestChainConfig); len(rs) != 0 {
+ t.Fatalf("deleted receipts returned: %v", rs)
+ }
+}
+
// Tests that logs associated with a single block can be retrieved.
func TestReadLogs(t *testing.T) {
db := NewMemoryDatabase()
diff --git a/core/rawdb/accessors_indexes.go b/core/rawdb/accessors_indexes.go
index 7f9409875193..21bdcf3d298f 100644
--- a/core/rawdb/accessors_indexes.go
+++ b/core/rawdb/accessors_indexes.go
@@ -21,6 +21,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/core/types"
"github.com/XinFinOrg/XDPoSChain/ethdb"
"github.com/XinFinOrg/XDPoSChain/log"
+ "github.com/XinFinOrg/XDPoSChain/params"
"github.com/XinFinOrg/XDPoSChain/rlp"
)
@@ -30,6 +31,23 @@ type TxLookupEntry struct {
Index uint64
}
+// ReadTxLookupEntry retrieves the positional metadata associated with a transaction
+// hash to allow retrieving the transaction or receipt by hash.
+func ReadTxLookupEntry(db ethdb.Reader, hash common.Hash) (common.Hash, uint64, uint64) {
+ // Load the positional metadata from disk and bail if it fails
+ data, _ := db.Get(txLookupKey(hash))
+ if len(data) == 0 {
+ return common.Hash{}, 0, 0
+ }
+ // Parse and return the contents of the lookup entry
+ var entry TxLookupEntry
+ if err := rlp.DecodeBytes(data, &entry); err != nil {
+ log.Error("Invalid lookup entry RLP", "hash", hash, "err", err)
+ return common.Hash{}, 0, 0
+ }
+ return entry.BlockHash, entry.BlockIndex, entry.Index
+}
+
// WriteTxLookupEntriesByBlock stores a positional metadata for every transaction from
// a block, enabling hash based transaction and receipt lookups.
func WriteTxLookupEntriesByBlock(db ethdb.KeyValueWriter, block *types.Block) {
@@ -54,3 +72,106 @@ func WriteTxLookupEntriesByBlock(db ethdb.KeyValueWriter, block *types.Block) {
func DeleteTxLookupEntry(db ethdb.KeyValueWriter, hash common.Hash) {
db.Delete(txLookupKey(hash))
}
+
+// ReadTransaction retrieves a specific transaction from the database, along with
+// its added positional metadata.
+func ReadTransaction(db ethdb.Reader, hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64) {
+ // Retrieve the lookup metadata and resolve the transaction from the body
+ blockHash, blockNumber, txIndex := ReadTxLookupEntry(db, hash)
+
+ if blockHash != (common.Hash{}) {
+ body := ReadBody(db, blockHash, blockNumber)
+ if body == nil || len(body.Transactions) <= int(txIndex) {
+ log.Error("Transaction referenced missing", "number", blockNumber, "hash", blockHash, "index", txIndex)
+ return nil, common.Hash{}, 0, 0
+ }
+ return body.Transactions[txIndex], blockHash, blockNumber, txIndex
+ }
+ // Old transaction representation, load the transaction and it's metadata separately
+ data, _ := db.Get(hash.Bytes())
+ if len(data) == 0 {
+ return nil, common.Hash{}, 0, 0
+ }
+ var tx types.Transaction
+ if err := rlp.DecodeBytes(data, &tx); err != nil {
+ return nil, common.Hash{}, 0, 0
+ }
+ // Retrieve the blockchain positional metadata
+ data, _ = db.Get(append(hash.Bytes(), oldTxMetaSuffix...))
+ if len(data) == 0 {
+ return nil, common.Hash{}, 0, 0
+ }
+ var entry TxLookupEntry
+ if err := rlp.DecodeBytes(data, &entry); err != nil {
+ return nil, common.Hash{}, 0, 0
+ }
+ return &tx, entry.BlockHash, entry.BlockIndex, entry.Index
+}
+
+// ReadReceipt retrieves a specific transaction receipt from the database, along with
+// its added positional metadata.
+func ReadReceipt(db ethdb.Reader, hash common.Hash, config *params.ChainConfig) (*types.Receipt, common.Hash, uint64, uint64) {
+ // Retrieve the lookup metadata and resolve the receipt from the receipts
+ blockHash, blockNumber, receiptIndex := ReadTxLookupEntry(db, hash)
+
+ if blockHash != (common.Hash{}) {
+ receipts := ReadReceipts(db, blockHash, blockNumber, config)
+ if len(receipts) <= int(receiptIndex) {
+ log.Error("Receipt refereced missing", "number", blockNumber, "hash", blockHash, "index", receiptIndex)
+ return nil, common.Hash{}, 0, 0
+ }
+ return receipts[receiptIndex], blockHash, blockNumber, receiptIndex
+ }
+ // Old receipt representation, load the receipt and set an unknown metadata
+ data, _ := db.Get(append(oldReceiptsPrefix, hash[:]...))
+ if len(data) == 0 {
+ return nil, common.Hash{}, 0, 0
+ }
+ var receipt types.ReceiptForStorage
+ err := rlp.DecodeBytes(data, &receipt)
+ if err != nil {
+ log.Error("Invalid receipt RLP", "hash", hash, "err", err)
+ }
+ return (*types.Receipt)(&receipt), common.Hash{}, 0, 0
+}
+
+// ReadBloomBits retrieves the compressed bloom bit vector belonging to the given
+// section and bit index from the.
+func ReadBloomBits(db ethdb.KeyValueReader, bit uint, section uint64, head common.Hash) ([]byte, error) {
+ return db.Get(bloomBitsKey(bit, section, head))
+}
+
+// WriteBloomBits writes the compressed bloom bits vector belonging to the given
+// section and bit index.
+func WriteBloomBits(db ethdb.KeyValueWriter, bit uint, section uint64, head common.Hash, bits []byte) {
+ if err := db.Put(bloomBitsKey(bit, section, head), bits); err != nil {
+ log.Crit("Failed to store bloom bits", "err", err)
+ }
+}
+
+// FindCommonAncestor returns the last common ancestor of two block headers
+func FindCommonAncestor(db ethdb.Reader, a, b *types.Header) *types.Header {
+ for bn := b.Number.Uint64(); a.Number.Uint64() > bn; {
+ a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1)
+ if a == nil {
+ return nil
+ }
+ }
+ for an := a.Number.Uint64(); an < b.Number.Uint64(); {
+ b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1)
+ if b == nil {
+ return nil
+ }
+ }
+ for a.Hash() != b.Hash() {
+ a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1)
+ if a == nil {
+ return nil
+ }
+ b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1)
+ if b == nil {
+ return nil
+ }
+ }
+ return a
+}
diff --git a/core/rawdb/accessors_indexes_test.go b/core/rawdb/accessors_indexes_test.go
new file mode 100644
index 000000000000..2412515d75ee
--- /dev/null
+++ b/core/rawdb/accessors_indexes_test.go
@@ -0,0 +1,67 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "math/big"
+ "testing"
+
+ "github.com/XinFinOrg/XDPoSChain/common"
+ "github.com/XinFinOrg/XDPoSChain/core/types"
+)
+
+// Tests that positional lookup metadata can be stored and retrieved.
+func TestLookupStorage(t *testing.T) {
+ db := NewMemoryDatabase()
+
+ tx1 := types.NewTransaction(1, common.BytesToAddress([]byte{0x11}), big.NewInt(111), 1111, big.NewInt(11111), []byte{0x11, 0x11, 0x11})
+ tx2 := types.NewTransaction(2, common.BytesToAddress([]byte{0x22}), big.NewInt(222), 2222, big.NewInt(22222), []byte{0x22, 0x22, 0x22})
+ tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33})
+ txs := []*types.Transaction{tx1, tx2, tx3}
+
+ block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil)
+
+ // Check that no transactions entries are in a pristine database
+ for i, tx := range txs {
+ if txn, _, _, _ := ReadTransaction(db, tx.Hash()); txn != nil {
+ t.Fatalf("tx #%d [%x]: non existent transaction returned: %v", i, tx.Hash(), txn)
+ }
+ }
+ // Insert all the transactions into the database, and verify contents
+ WriteBlock(db, block)
+ WriteTxLookupEntriesByBlock(db, block)
+
+ for i, tx := range txs {
+ if txn, hash, number, index := ReadTransaction(db, tx.Hash()); txn == nil {
+ t.Fatalf("tx #%d [%x]: transaction not found", i, tx.Hash())
+ } else {
+ if hash != block.Hash() || number != block.NumberU64() || index != uint64(i) {
+ t.Fatalf("tx #%d [%x]: positional metadata mismatch: have %x/%d/%d, want %x/%v/%v", i, tx.Hash(), hash, number, index, block.Hash(), block.NumberU64(), i)
+ }
+ if tx.String() != txn.String() {
+ t.Fatalf("tx #%d [%x]: transaction mismatch: have %v, want %v", i, tx.Hash(), txn, tx)
+ }
+ }
+ }
+ // Delete the transactions and check purge
+ for i, tx := range txs {
+ DeleteTxLookupEntry(db, tx.Hash())
+ if txn, _, _, _ := ReadTransaction(db, tx.Hash()); txn != nil {
+ t.Fatalf("tx #%d [%x]: deleted transaction returned: %v", i, tx.Hash(), txn)
+ }
+ }
+}
diff --git a/core/rawdb/accessors_metadata.go b/core/rawdb/accessors_metadata.go
index a56ab22d7b4c..857b4c2a3e49 100644
--- a/core/rawdb/accessors_metadata.go
+++ b/core/rawdb/accessors_metadata.go
@@ -17,11 +17,67 @@
package rawdb
import (
+ "encoding/json"
+ "errors"
+
"github.com/XinFinOrg/XDPoSChain/common"
"github.com/XinFinOrg/XDPoSChain/ethdb"
"github.com/XinFinOrg/XDPoSChain/log"
+ "github.com/XinFinOrg/XDPoSChain/params"
+ "github.com/XinFinOrg/XDPoSChain/rlp"
)
+// ReadDatabaseVersion reads the version number from db.
+func ReadDatabaseVersion(db ethdb.KeyValueReader) int {
+ var vsn uint
+ enc, _ := db.Get(databaseVersionKey)
+ rlp.DecodeBytes(enc, &vsn)
+ return int(vsn)
+}
+
+// WriteDatabaseVersion writes vsn as the version number to db.
+func WriteDatabaseVersion(db ethdb.KeyValueWriter, vsn int) {
+ enc, _ := rlp.EncodeToBytes(uint(vsn))
+ db.Put(databaseVersionKey, enc)
+}
+
+// ReadChainConfig will fetch the network settings based on the given hash.
+func ReadChainConfig(db ethdb.KeyValueReader, hash common.Hash) (*params.ChainConfig, error) {
+ jsonChainConfig, _ := db.Get(configKey(hash))
+ if len(jsonChainConfig) == 0 {
+ return nil, errors.New("ChainConfig not found") // general config not found error
+ }
+
+ var config params.ChainConfig
+ if err := json.Unmarshal(jsonChainConfig, &config); err != nil {
+ return nil, err
+ }
+
+ return &config, nil
+}
+
+// WriteChainConfig writes the chain config settings to the database.
+func WriteChainConfig(db ethdb.KeyValueWriter, hash common.Hash, cfg *params.ChainConfig) error {
+ // short circuit and ignore if nil config. GetChainConfig
+ // will return a default.
+ if cfg == nil {
+ return nil
+ }
+
+ data, err := json.Marshal(cfg)
+ if err != nil {
+ log.Crit("Failed to JSON encode chain config", "err", err)
+ return err
+ }
+ return db.Put(configKey(hash), data)
+}
+
+// ReadPreimage returns a Database instance with the key prefix for preimage entries.
+func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte {
+ data, _ := db.Get(preimageKey(hash))
+ return data
+}
+
// WritePreimages writes the provided set of preimages to the database.
func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) {
for hash, preimage := range preimages {
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
index a0a1852c7550..4136c495d05c 100644
--- a/core/rawdb/schema.go
+++ b/core/rawdb/schema.go
@@ -26,6 +26,9 @@ import (
// The fields below define the low level database schema prefixing.
var (
+ // databaseVersionKey tracks the current database version.
+ databaseVersionKey = []byte("DatabaseVersion")
+
// headHeaderKey tracks the latest known header's hash.
headHeaderKey = []byte("LastHeader")
@@ -35,6 +38,8 @@ var (
// headFastBlockKey tracks the latest known incomplete block's hash during fast sync.
headFastBlockKey = []byte("LastFast")
+ trieSyncKey = []byte("TrieSync")
+
// Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes).
headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
headerTDSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + headerTDSuffix -> td
@@ -44,9 +49,20 @@ var (
blockBodyPrefix = []byte("b") // blockBodyPrefix + num (uint64 big endian) + hash -> block body
blockReceiptsPrefix = []byte("r") // blockReceiptsPrefix + num (uint64 big endian) + hash -> block receipts
- txLookupPrefix = []byte("l") // txLookupPrefix + hash -> transaction/receipt lookup metadata
+ txLookupPrefix = []byte("l") // txLookupPrefix + hash -> transaction/receipt lookup metadata
+ bloomBitsPrefix = []byte("B") // bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash -> bloom bits
+
+ // used by old db, now only used for conversion
+ oldReceiptsPrefix = []byte("receipts-")
- preimagePrefix = []byte("secure-key-") // preimagePrefix + hash -> preimage
+ preimagePrefix = []byte("secure-key-") // preimagePrefix + hash -> preimage
+ configPrefix = []byte("ethereum-config-") // config prefix for the db
+
+ // Chain index prefixes (use `i` + single byte to avoid mixing data types).
+ BloomBitsIndexPrefix = []byte("iB") // BloomBitsIndexPrefix is the data table of a chain indexer to track its progress
+
+ // used by old db, now only used for conversion
+ oldTxMetaSuffix = []byte{0x01}
preimageCounter = metrics.NewRegisteredCounter("db/preimage/total", nil)
preimageHitCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil)
@@ -108,7 +124,22 @@ func txLookupKey(hash common.Hash) []byte {
return append(txLookupPrefix, hash.Bytes()...)
}
+// bloomBitsKey = bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash
+func bloomBitsKey(bit uint, section uint64, hash common.Hash) []byte {
+ key := append(append(bloomBitsPrefix, make([]byte, 10)...), hash.Bytes()...)
+
+ binary.BigEndian.PutUint16(key[1:], uint16(bit))
+ binary.BigEndian.PutUint64(key[3:], section)
+
+ return key
+}
+
// preimageKey = preimagePrefix + hash
func preimageKey(hash common.Hash) []byte {
return append(preimagePrefix, hash.Bytes()...)
}
+
+// configKey = configPrefix + hash
+func configKey(hash common.Hash) []byte {
+ return append(configPrefix, hash.Bytes()...)
+}
diff --git a/eth/api.go b/eth/api.go
index 3c8e1bd1626b..53497cb6e78d 100644
--- a/eth/api.go
+++ b/eth/api.go
@@ -19,6 +19,7 @@ package eth
import (
"compress/gzip"
"context"
+ "errors"
"fmt"
"io"
"math/big"
@@ -28,6 +29,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/common"
"github.com/XinFinOrg/XDPoSChain/common/hexutil"
"github.com/XinFinOrg/XDPoSChain/core"
+ "github.com/XinFinOrg/XDPoSChain/core/rawdb"
"github.com/XinFinOrg/XDPoSChain/core/state"
"github.com/XinFinOrg/XDPoSChain/core/types"
"github.com/XinFinOrg/XDPoSChain/log"
@@ -340,8 +342,10 @@ func NewPrivateDebugAPI(config *params.ChainConfig, eth *Ethereum) *PrivateDebug
// Preimage is a debug API function that returns the preimage for a sha3 hash, if known.
func (api *PrivateDebugAPI) Preimage(ctx context.Context, hash common.Hash) (hexutil.Bytes, error) {
- db := core.PreimageTable(api.eth.ChainDb())
- return db.Get(hash.Bytes())
+ if preimage := rawdb.ReadPreimage(api.eth.ChainDb(), hash); preimage != nil {
+ return preimage, nil
+ }
+ return nil, errors.New("unknown preimage")
}
// GetBadBLocks returns a list of the last 'bad blocks' that the client has seen on the network
diff --git a/eth/api_tracer.go b/eth/api_tracer.go
index 18c0fc010945..afa472da109c 100644
--- a/eth/api_tracer.go
+++ b/eth/api_tracer.go
@@ -31,6 +31,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/common"
"github.com/XinFinOrg/XDPoSChain/common/hexutil"
"github.com/XinFinOrg/XDPoSChain/core"
+ "github.com/XinFinOrg/XDPoSChain/core/rawdb"
"github.com/XinFinOrg/XDPoSChain/core/state"
"github.com/XinFinOrg/XDPoSChain/core/types"
"github.com/XinFinOrg/XDPoSChain/core/vm"
@@ -626,7 +627,7 @@ func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (*
// and returns them as a JSON object.
func (api *PrivateDebugAPI) TraceTransaction(ctx context.Context, hash common.Hash, config *TraceConfig) (interface{}, error) {
// Retrieve the transaction and assemble its EVM context
- tx, blockHash, _, index := core.GetTransaction(api.eth.ChainDb(), hash)
+ tx, blockHash, _, index := rawdb.ReadTransaction(api.eth.ChainDb(), hash)
if tx == nil {
return nil, fmt.Errorf("transaction %x not found", hash)
}
diff --git a/eth/backend.go b/eth/backend.go
index 6a3920dcc277..cbe149390031 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -37,6 +37,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/contracts"
"github.com/XinFinOrg/XDPoSChain/core"
"github.com/XinFinOrg/XDPoSChain/core/bloombits"
+ "github.com/XinFinOrg/XDPoSChain/core/rawdb"
"github.com/XinFinOrg/XDPoSChain/core/txpool"
"github.com/XinFinOrg/XDPoSChain/core/types"
"github.com/XinFinOrg/XDPoSChain/core/vm"
@@ -153,11 +154,11 @@ func New(ctx *node.ServiceContext, config *ethconfig.Config, XDCXServ *XDCx.XDCX
log.Info("Initialising Ethereum protocol", "versions", ProtocolVersions, "network", config.NetworkId)
if !config.SkipBcVersionCheck {
- bcVersion := core.GetBlockChainVersion(chainDb)
+ bcVersion := rawdb.ReadDatabaseVersion(chainDb)
if bcVersion != core.BlockChainVersion && bcVersion != 0 {
return nil, fmt.Errorf("blockchain DB version mismatch (%d / %d). Run geth upgradedb", bcVersion, core.BlockChainVersion)
}
- core.WriteBlockChainVersion(chainDb, core.BlockChainVersion)
+ rawdb.WriteDatabaseVersion(chainDb, core.BlockChainVersion)
}
var (
vmConfig = vm.Config{EnablePreimageRecording: config.EnablePreimageRecording}
@@ -180,7 +181,7 @@ func New(ctx *node.ServiceContext, config *ethconfig.Config, XDCXServ *XDCx.XDCX
if compat, ok := genesisErr.(*params.ConfigCompatError); ok {
log.Warn("Rewinding chain to upgrade configuration", "err", compat)
eth.blockchain.SetHead(compat.RewindTo)
- core.WriteChainConfig(chainDb, genesisHash, chainConfig)
+ rawdb.WriteChainConfig(chainDb, genesisHash, chainConfig)
}
eth.bloomIndexer.Start(eth.blockchain)
diff --git a/eth/bloombits.go b/eth/bloombits.go
index 097b4fb94d5b..4fa02c9d74cc 100644
--- a/eth/bloombits.go
+++ b/eth/bloombits.go
@@ -61,8 +61,8 @@ func (eth *Ethereum) startBloomHandlers() {
task := <-request
task.Bitsets = make([][]byte, len(task.Sections))
for i, section := range task.Sections {
- head := core.GetCanonicalHash(eth.chainDb, (section+1)*params.BloomBitsBlocks-1)
- if compVector, err := core.GetBloomBits(eth.chainDb, task.Bit, section, head); err == nil {
+ head := rawdb.ReadCanonicalHash(eth.chainDb, (section+1)*params.BloomBitsBlocks-1)
+ if compVector, err := rawdb.ReadBloomBits(eth.chainDb, task.Bit, section, head); err == nil {
if blob, err := bitutil.DecompressBytes(compVector, int(params.BloomBitsBlocks)/8); err == nil {
task.Bitsets[i] = blob
} else {
@@ -108,7 +108,7 @@ func NewBloomIndexer(db ethdb.Database, size uint64) *core.ChainIndexer {
db: db,
size: size,
}
- table := rawdb.NewTable(db, string(core.BloomBitsIndexPrefix))
+ table := rawdb.NewTable(db, string(rawdb.BloomBitsIndexPrefix))
return core.NewChainIndexer(db, table, backend, size, bloomConfirms, bloomThrottling, "bloombits")
}
@@ -138,7 +138,7 @@ func (b *BloomIndexer) Commit() error {
if err != nil {
return err
}
- core.WriteBloomBits(batch, uint(i), b.section, b.head, bitutil.CompressBytes(bits))
+ rawdb.WriteBloomBits(batch, uint(i), b.section, b.head, bitutil.CompressBytes(bits))
}
return batch.Write()
}
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index ec779eb07732..9f25fc905f46 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -27,7 +27,7 @@ import (
"github.com/XinFinOrg/XDPoSChain"
"github.com/XinFinOrg/XDPoSChain/common"
- "github.com/XinFinOrg/XDPoSChain/core"
+ "github.com/XinFinOrg/XDPoSChain/core/rawdb"
"github.com/XinFinOrg/XDPoSChain/core/types"
"github.com/XinFinOrg/XDPoSChain/ethdb"
"github.com/XinFinOrg/XDPoSChain/event"
@@ -230,7 +230,7 @@ func New(mode SyncMode, stateDb ethdb.Database, mux *event.TypeMux, chain BlockC
stateCh: make(chan dataPack),
stateSyncStart: make(chan *stateSync),
syncStatsState: stateSyncStats{
- processed: core.GetTrieSyncProgress(stateDb),
+ processed: rawdb.ReadFastTrieProgress(stateDb),
},
trackStateReq: make(chan *stateReq),
}
diff --git a/eth/downloader/fakepeer.go b/eth/downloader/fakepeer.go
index 9d399795d06d..e00ce68d9711 100644
--- a/eth/downloader/fakepeer.go
+++ b/eth/downloader/fakepeer.go
@@ -21,6 +21,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/common"
"github.com/XinFinOrg/XDPoSChain/core"
+ "github.com/XinFinOrg/XDPoSChain/core/rawdb"
"github.com/XinFinOrg/XDPoSChain/core/types"
"github.com/XinFinOrg/XDPoSChain/ethdb"
)
@@ -126,7 +127,7 @@ func (p *FakePeer) RequestBodies(hashes []common.Hash) error {
uncles [][]*types.Header
)
for _, hash := range hashes {
- block := core.GetBlock(p.db, hash, p.hc.GetBlockNumber(hash))
+ block := rawdb.ReadBlock(p.db, hash, *p.hc.GetBlockNumber(hash))
txs = append(txs, block.Transactions())
uncles = append(uncles, block.Uncles())
@@ -140,7 +141,7 @@ func (p *FakePeer) RequestBodies(hashes []common.Hash) error {
func (p *FakePeer) RequestReceipts(hashes []common.Hash) error {
var receipts [][]*types.Receipt
for _, hash := range hashes {
- receipts = append(receipts, core.GetBlockReceipts(p.db, hash, p.hc.GetBlockNumber(hash)))
+ receipts = append(receipts, rawdb.ReadRawReceipts(p.db, hash, *p.hc.GetBlockNumber(hash)))
}
p.dl.DeliverReceipts(p.id, receipts)
return nil
diff --git a/eth/downloader/statesync.go b/eth/downloader/statesync.go
index 608546873dc1..19abe0346bef 100644
--- a/eth/downloader/statesync.go
+++ b/eth/downloader/statesync.go
@@ -23,7 +23,7 @@ import (
"time"
"github.com/XinFinOrg/XDPoSChain/common"
- "github.com/XinFinOrg/XDPoSChain/core"
+ "github.com/XinFinOrg/XDPoSChain/core/rawdb"
"github.com/XinFinOrg/XDPoSChain/core/state"
"github.com/XinFinOrg/XDPoSChain/ethdb"
"github.com/XinFinOrg/XDPoSChain/ethdb/memorydb"
@@ -468,6 +468,6 @@ func (s *stateSync) updateStats(written, duplicate, unexpected int, duration tim
log.Info("Imported new state entries", "count", written, "elapsed", common.PrettyDuration(duration), "processed", s.d.syncStatsState.processed, "pending", s.d.syncStatsState.pending, "retry", len(s.tasks), "duplicate", s.d.syncStatsState.duplicate, "unexpected", s.d.syncStatsState.unexpected)
}
if written > 0 {
- core.WriteTrieSyncProgress(s.d.stateDB, s.d.syncStatsState.processed)
+ rawdb.WriteFastTrieProgress(s.d.stateDB, s.d.syncStatsState.processed)
}
}
diff --git a/eth/filters/bench_test.go b/eth/filters/bench_test.go
index f465d67f7069..c83568b99e8d 100644
--- a/eth/filters/bench_test.go
+++ b/eth/filters/bench_test.go
@@ -25,7 +25,6 @@ import (
"github.com/XinFinOrg/XDPoSChain/common"
"github.com/XinFinOrg/XDPoSChain/common/bitutil"
- "github.com/XinFinOrg/XDPoSChain/core"
"github.com/XinFinOrg/XDPoSChain/core/bloombits"
"github.com/XinFinOrg/XDPoSChain/core/rawdb"
"github.com/XinFinOrg/XDPoSChain/core/types"
@@ -71,20 +70,20 @@ func benchmarkBloomBits(b *testing.B, sectionSize uint64) {
if err != nil {
b.Fatalf("error opening database at %v: %v", benchDataDir, err)
}
- head := core.GetHeadBlockHash(db)
+ head := rawdb.ReadHeadBlockHash(db)
if head == (common.Hash{}) {
b.Fatalf("chain data not found at %v", benchDataDir)
}
clearBloomBits(db)
b.Log("Generating bloombits data...")
- headNum := core.GetBlockNumber(db, head)
- if headNum < sectionSize+512 {
+ headNum := rawdb.ReadHeaderNumber(db, head)
+ if headNum == nil || *headNum < sectionSize+512 {
b.Fatalf("not enough blocks for running a benchmark")
}
start := time.Now()
- cnt := (headNum - 512) / sectionSize
+ cnt := (*headNum - 512) / sectionSize
var dataSize, compSize uint64
for sectionIdx := uint64(0); sectionIdx < cnt; sectionIdx++ {
bc, err := bloombits.NewGenerator(uint(sectionSize))
@@ -93,14 +92,14 @@ func benchmarkBloomBits(b *testing.B, sectionSize uint64) {
}
var header *types.Header
for i := sectionIdx * sectionSize; i < (sectionIdx+1)*sectionSize; i++ {
- hash := core.GetCanonicalHash(db, i)
- header = core.GetHeader(db, hash, i)
+ hash := rawdb.ReadCanonicalHash(db, i)
+ header = rawdb.ReadHeader(db, hash, i)
if header == nil {
b.Fatalf("Error creating bloomBits data")
}
bc.AddBloom(uint(i-sectionIdx*sectionSize), header.Bloom)
}
- sectionHead := core.GetCanonicalHash(db, (sectionIdx+1)*sectionSize-1)
+ sectionHead := rawdb.ReadCanonicalHash(db, (sectionIdx+1)*sectionSize-1)
for i := 0; i < types.BloomBitLength; i++ {
data, err := bc.Bitset(uint(i))
if err != nil {
@@ -109,7 +108,7 @@ func benchmarkBloomBits(b *testing.B, sectionSize uint64) {
comp := bitutil.CompressBytes(data)
dataSize += uint64(len(data))
compSize += uint64(len(comp))
- core.WriteBloomBits(db, uint(i), sectionIdx, sectionHead, comp)
+ rawdb.WriteBloomBits(db, uint(i), sectionIdx, sectionHead, comp)
}
//if sectionIdx%50 == 0 {
// b.Log(" section", sectionIdx, "/", cnt)
@@ -183,11 +182,11 @@ func BenchmarkNoBloomBits(b *testing.B) {
if err != nil {
b.Fatalf("error opening database at %v: %v", benchDataDir, err)
}
- head := core.GetHeadBlockHash(db)
+ head := rawdb.ReadHeadBlockHash(db)
if head == (common.Hash{}) {
b.Fatalf("chain data not found at %v", benchDataDir)
}
- headNum := core.GetBlockNumber(db, head)
+ headNum := rawdb.ReadHeaderNumber(db, head)
clearBloomBits(db)
@@ -195,10 +194,10 @@ func BenchmarkNoBloomBits(b *testing.B) {
b.Log("Running filter benchmarks...")
start := time.Now()
- filter := sys.NewRangeFilter(0, int64(headNum), []common.Address{{}}, nil)
+ filter := sys.NewRangeFilter(0, int64(*headNum), []common.Address{{}}, nil)
filter.Logs(context.Background())
d := time.Since(start)
b.Log("Finished running filter benchmarks")
- b.Log(" ", d, "total ", d*time.Duration(1000000)/time.Duration(headNum+1), "per million blocks")
+ b.Log(" ", d, "total ", d*time.Duration(1000000)/time.Duration(*headNum+1), "per million blocks")
db.Close()
}
diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go
index ad592154efa9..052526896779 100644
--- a/eth/filters/filter_system.go
+++ b/eth/filters/filter_system.go
@@ -31,6 +31,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/common/lru"
"github.com/XinFinOrg/XDPoSChain/core"
"github.com/XinFinOrg/XDPoSChain/core/bloombits"
+ "github.com/XinFinOrg/XDPoSChain/core/rawdb"
"github.com/XinFinOrg/XDPoSChain/core/types"
"github.com/XinFinOrg/XDPoSChain/ethdb"
"github.com/XinFinOrg/XDPoSChain/event"
@@ -493,11 +494,11 @@ func (es *EventSystem) lightFilterNewHead(newHeader *types.Header, callBack func
for oldh.Hash() != newh.Hash() {
if oldh.Number.Uint64() >= newh.Number.Uint64() {
oldHeaders = append(oldHeaders, oldh)
- oldh = core.GetHeader(es.backend.ChainDb(), oldh.ParentHash, oldh.Number.Uint64()-1)
+ oldh = rawdb.ReadHeader(es.backend.ChainDb(), oldh.ParentHash, oldh.Number.Uint64()-1)
}
if oldh.Number.Uint64() < newh.Number.Uint64() {
newHeaders = append(newHeaders, newh)
- newh = core.GetHeader(es.backend.ChainDb(), newh.ParentHash, newh.Number.Uint64()-1)
+ newh = rawdb.ReadHeader(es.backend.ChainDb(), newh.ParentHash, newh.Number.Uint64()-1)
if newh == nil {
// happens when CHT syncing, nothing to do
newh = oldh
diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go
index 0d169167dbc8..b3b07243e0c4 100644
--- a/eth/filters/filter_system_test.go
+++ b/eth/filters/filter_system_test.go
@@ -60,18 +60,25 @@ func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumbe
var hash common.Hash
var num uint64
if blockNr == rpc.LatestBlockNumber {
- hash = core.GetHeadBlockHash(b.db)
- num = core.GetBlockNumber(b.db, hash)
+ hash = rawdb.ReadHeadBlockHash(b.db)
+ number := rawdb.ReadHeaderNumber(b.db, hash)
+ if number == nil {
+ return nil, nil
+ }
+ num = *number
} else {
num = uint64(blockNr)
- hash = core.GetCanonicalHash(b.db, num)
+ hash = rawdb.ReadCanonicalHash(b.db, num)
}
- return core.GetHeader(b.db, hash, num), nil
+ return rawdb.ReadHeader(b.db, hash, num), nil
}
func (b *testBackend) HeaderByHash(ctx context.Context, blockHash common.Hash) (*types.Header, error) {
- num := core.GetBlockNumber(b.db, blockHash)
- return core.GetHeader(b.db, blockHash, num), nil
+ number := rawdb.ReadHeaderNumber(b.db, blockHash)
+ if number == nil {
+ return nil, nil
+ }
+ return rawdb.ReadHeader(b.db, blockHash, *number), nil
}
func (b *testBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) {
@@ -82,8 +89,11 @@ func (b *testBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.
}
func (b *testBackend) GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) {
- number := core.GetBlockNumber(b.db, blockHash)
- return core.GetBlockReceipts(b.db, blockHash, number), nil
+
+ if number := rawdb.ReadHeaderNumber(b.db, blockHash); number != nil {
+ return rawdb.ReadReceipts(b.db, blockHash, *number, params.TestChainConfig), nil
+ }
+ return nil, nil
}
func (b *testBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) {
@@ -136,8 +146,8 @@ func (b *testBackend) ServiceFilter(ctx context.Context, session *bloombits.Matc
task.Bitsets = make([][]byte, len(task.Sections))
for i, section := range task.Sections {
if rand.Int()%4 != 0 { // Handle occasional missing deliveries
- head := core.GetCanonicalHash(b.db, (section+1)*params.BloomBitsBlocks-1)
- task.Bitsets[i], _ = core.GetBloomBits(b.db, task.Bit, section, head)
+ head := rawdb.ReadCanonicalHash(b.db, (section+1)*params.BloomBitsBlocks-1)
+ task.Bitsets[i], _ = rawdb.ReadBloomBits(b.db, task.Bit, section, head)
}
}
request <- task
diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go
index 489917d17c2b..1fbe6f043e2c 100644
--- a/eth/filters/filter_test.go
+++ b/eth/filters/filter_test.go
@@ -81,9 +81,7 @@ func BenchmarkFilters(b *testing.B) {
rawdb.WriteBlock(db, block)
rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64())
rawdb.WriteHeadBlockHash(db, block.Hash())
- if err := core.WriteBlockReceipts(db, block.Hash(), block.NumberU64(), receipts[i]); err != nil {
- b.Fatal("error writing block receipts:", err)
- }
+ rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), receipts[i])
}
b.ResetTimer()
@@ -166,9 +164,7 @@ func TestFilters(t *testing.T) {
rawdb.WriteBlock(db, block)
rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64())
rawdb.WriteHeadBlockHash(db, block.Hash())
- if err := core.WriteBlockReceipts(db, block.Hash(), block.NumberU64(), receipts[i]); err != nil {
- t.Fatal("error writing block receipts:", err)
- }
+ rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), receipts[i])
}
filter := sys.NewRangeFilter(0, -1, []common.Address{addr}, [][]common.Hash{{hash1, hash2, hash3, hash4}})
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index 5c9c55562b01..12085a9c697d 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -42,6 +42,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/consensus/misc/eip1559"
contractValidator "github.com/XinFinOrg/XDPoSChain/contracts/validator/contract"
"github.com/XinFinOrg/XDPoSChain/core"
+ "github.com/XinFinOrg/XDPoSChain/core/rawdb"
"github.com/XinFinOrg/XDPoSChain/core/state"
"github.com/XinFinOrg/XDPoSChain/core/types"
"github.com/XinFinOrg/XDPoSChain/core/vm"
@@ -621,7 +622,7 @@ func (s *PublicBlockChainAPI) GetBalance(ctx context.Context, address common.Add
// GetTransactionAndReceiptProof returns the Trie transaction and receipt proof of the given transaction hash.
func (s *PublicBlockChainAPI) GetTransactionAndReceiptProof(ctx context.Context, hash common.Hash) (map[string]interface{}, error) {
- tx, blockHash, _, index := core.GetTransaction(s.b.ChainDb(), hash)
+ tx, blockHash, _, index := rawdb.ReadTransaction(s.b.ChainDb(), hash)
if tx == nil {
return nil, nil
}
@@ -2213,7 +2214,7 @@ func (s *PublicTransactionPoolAPI) GetTransactionCount(ctx context.Context, addr
// GetTransactionByHash returns the transaction for the given hash
func (s *PublicTransactionPoolAPI) GetTransactionByHash(ctx context.Context, hash common.Hash) (*RPCTransaction, error) {
// Try to return an already finalized transaction
- tx, blockHash, blockNumber, index := core.GetTransaction(s.b.ChainDb(), hash)
+ tx, blockHash, blockNumber, index := rawdb.ReadTransaction(s.b.ChainDb(), hash)
if tx != nil {
header, err := s.b.HeaderByHash(ctx, blockHash)
if err != nil {
@@ -2233,7 +2234,7 @@ func (s *PublicTransactionPoolAPI) GetTransactionByHash(ctx context.Context, has
// GetRawTransactionByHash returns the bytes of the transaction for the given hash.
func (s *PublicTransactionPoolAPI) GetRawTransactionByHash(ctx context.Context, hash common.Hash) (hexutil.Bytes, error) {
// Retrieve a finalized transaction, or a pooled otherwise
- tx, _, _, _ := core.GetTransaction(s.b.ChainDb(), hash)
+ tx, _, _, _ := rawdb.ReadTransaction(s.b.ChainDb(), hash)
if tx == nil {
if tx = s.b.GetPoolTransaction(hash); tx == nil {
// Transaction not found anywhere, abort
@@ -2246,7 +2247,7 @@ func (s *PublicTransactionPoolAPI) GetRawTransactionByHash(ctx context.Context,
// GetTransactionReceipt returns the transaction receipt for the given transaction hash.
func (s *PublicTransactionPoolAPI) GetTransactionReceipt(ctx context.Context, hash common.Hash) (map[string]interface{}, error) {
- tx, blockHash, blockNumber, index := core.GetTransaction(s.b.ChainDb(), hash)
+ tx, blockHash, blockNumber, index := rawdb.ReadTransaction(s.b.ChainDb(), hash)
if tx == nil {
// When the transaction doesn't exist, the RPC method should return JSON null
// as per specification.
@@ -2458,7 +2459,7 @@ func (s *PublicXDCXTransactionPoolAPI) SendLendingRawTransaction(ctx context.Con
func (s *PublicXDCXTransactionPoolAPI) GetOrderTxMatchByHash(ctx context.Context, hash common.Hash) ([]*tradingstate.OrderItem, error) {
var tx *types.Transaction
orders := []*tradingstate.OrderItem{}
- if tx, _, _, _ = core.GetTransaction(s.b.ChainDb(), hash); tx == nil {
+ if tx, _, _, _ = rawdb.ReadTransaction(s.b.ChainDb(), hash); tx == nil {
if tx = s.b.GetPoolTransaction(hash); tx == nil {
return []*tradingstate.OrderItem{}, nil
}
@@ -3189,7 +3190,7 @@ func (s *PublicXDCXTransactionPoolAPI) GetBorrows(ctx context.Context, lendingTo
// GetLendingTxMatchByHash returns lendingItems which have been processed at tx of the given txhash
func (s *PublicXDCXTransactionPoolAPI) GetLendingTxMatchByHash(ctx context.Context, hash common.Hash) ([]*lendingstate.LendingItem, error) {
var tx *types.Transaction
- if tx, _, _, _ = core.GetTransaction(s.b.ChainDb(), hash); tx == nil {
+ if tx, _, _, _ = rawdb.ReadTransaction(s.b.ChainDb(), hash); tx == nil {
if tx = s.b.GetPoolTransaction(hash); tx == nil {
return []*lendingstate.LendingItem{}, nil
}
@@ -3205,7 +3206,7 @@ func (s *PublicXDCXTransactionPoolAPI) GetLendingTxMatchByHash(ctx context.Conte
// GetLiquidatedTradesByTxHash returns trades which closed by XDCX protocol at the tx of the give hash
func (s *PublicXDCXTransactionPoolAPI) GetLiquidatedTradesByTxHash(ctx context.Context, hash common.Hash) (lendingstate.FinalizedResult, error) {
var tx *types.Transaction
- if tx, _, _, _ = core.GetTransaction(s.b.ChainDb(), hash); tx == nil {
+ if tx, _, _, _ = rawdb.ReadTransaction(s.b.ChainDb(), hash); tx == nil {
if tx = s.b.GetPoolTransaction(hash); tx == nil {
return lendingstate.FinalizedResult{}, nil
}
diff --git a/les/backend.go b/les/backend.go
index e982603d2fdb..80ec371c8cdf 100644
--- a/les/backend.go
+++ b/les/backend.go
@@ -28,6 +28,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/consensus"
"github.com/XinFinOrg/XDPoSChain/core"
"github.com/XinFinOrg/XDPoSChain/core/bloombits"
+ "github.com/XinFinOrg/XDPoSChain/core/rawdb"
"github.com/XinFinOrg/XDPoSChain/core/types"
"github.com/XinFinOrg/XDPoSChain/eth"
"github.com/XinFinOrg/XDPoSChain/eth/downloader"
@@ -123,7 +124,7 @@ func New(ctx *node.ServiceContext, config *ethconfig.Config) (*LightEthereum, er
if compat, ok := genesisErr.(*params.ConfigCompatError); ok {
log.Warn("Rewinding chain to upgrade configuration", "err", compat)
leth.blockchain.SetHead(compat.RewindTo)
- core.WriteChainConfig(chainDb, genesisHash, chainConfig)
+ rawdb.WriteChainConfig(chainDb, genesisHash, chainConfig)
}
leth.txPool = light.NewTxPool(leth.chainConfig, leth.blockchain, leth.relay)
diff --git a/les/fetcher.go b/les/fetcher.go
index 856d4978ab60..dc3f1e374e29 100644
--- a/les/fetcher.go
+++ b/les/fetcher.go
@@ -25,7 +25,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/common"
"github.com/XinFinOrg/XDPoSChain/common/mclock"
"github.com/XinFinOrg/XDPoSChain/consensus"
- "github.com/XinFinOrg/XDPoSChain/core"
+ "github.com/XinFinOrg/XDPoSChain/core/rawdb"
"github.com/XinFinOrg/XDPoSChain/core/types"
"github.com/XinFinOrg/XDPoSChain/light"
"github.com/XinFinOrg/XDPoSChain/log"
@@ -280,7 +280,7 @@ func (f *lightFetcher) announce(p *peer, head *announceData) {
// if one of root's children is canonical, keep it, delete other branches and root itself
var newRoot *fetcherTreeNode
for i, nn := range fp.root.children {
- if core.GetCanonicalHash(f.pm.chainDb, nn.number) == nn.hash {
+ if rawdb.ReadCanonicalHash(f.pm.chainDb, nn.number) == nn.hash {
fp.root.children = append(fp.root.children[:i], fp.root.children[i+1:]...)
nn.parent = nil
newRoot = nn
@@ -363,7 +363,7 @@ func (f *lightFetcher) peerHasBlock(p *peer, hash common.Hash, number uint64) bo
//
// when syncing, just check if it is part of the known chain, there is nothing better we
// can do since we do not know the most recent block hash yet
- return core.GetCanonicalHash(f.pm.chainDb, fp.root.number) == fp.root.hash && core.GetCanonicalHash(f.pm.chainDb, number) == hash
+ return rawdb.ReadCanonicalHash(f.pm.chainDb, fp.root.number) == fp.root.hash && rawdb.ReadCanonicalHash(f.pm.chainDb, number) == hash
}
// requestAmount calculates the amount of headers to be downloaded starting
diff --git a/les/handler.go b/les/handler.go
index 7e7b99d4194e..34f5ad2da944 100644
--- a/les/handler.go
+++ b/les/handler.go
@@ -547,9 +547,11 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
break
}
// Retrieve the requested block body, stopping if enough was found
- if data := core.GetBodyRLP(pm.chainDb, hash, core.GetBlockNumber(pm.chainDb, hash)); len(data) != 0 {
- bodies = append(bodies, data)
- bytes += len(data)
+ if number := rawdb.ReadHeaderNumber(pm.chainDb, hash); number != nil {
+ if data := rawdb.ReadBodyRLP(pm.chainDb, hash, *number); len(data) != 0 {
+ bodies = append(bodies, data)
+ bytes += len(data)
+ }
}
}
bv, rcost := p.fcClient.RequestProcessed(costs.baseCost + uint64(reqCnt)*costs.reqCost)
@@ -598,20 +600,22 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
}
for _, req := range req.Reqs {
// Retrieve the requested state entry, stopping if enough was found
- if header := core.GetHeader(pm.chainDb, req.BHash, core.GetBlockNumber(pm.chainDb, req.BHash)); header != nil {
- statedb, err := pm.blockchain.State()
- if err != nil {
- continue
- }
- account, err := pm.getAccount(statedb, header.Root, common.BytesToHash(req.AccKey))
- if err != nil {
- continue
- }
- code, _ := statedb.Database().TrieDB().Node(common.BytesToHash(account.CodeHash))
+ if number := rawdb.ReadHeaderNumber(pm.chainDb, req.BHash); number != nil {
+ if header := rawdb.ReadHeader(pm.chainDb, req.BHash, *number); header != nil {
+ statedb, err := pm.blockchain.State()
+ if err != nil {
+ continue
+ }
+ account, err := pm.getAccount(statedb, header.Root, common.BytesToHash(req.AccKey))
+ if err != nil {
+ continue
+ }
+ code, _ := statedb.Database().TrieDB().Node(common.BytesToHash(account.CodeHash))
- data = append(data, code)
- if bytes += len(code); bytes >= softResponseLimit {
- break
+ data = append(data, code)
+ if bytes += len(code); bytes >= softResponseLimit {
+ break
+ }
}
}
}
@@ -664,7 +668,10 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
break
}
// Retrieve the requested block's receipts, skipping if unknown to us
- results := core.GetBlockReceipts(pm.chainDb, hash, core.GetBlockNumber(pm.chainDb, hash))
+ var results types.Receipts
+ if number := rawdb.ReadHeaderNumber(pm.chainDb, hash); number != nil {
+ results = rawdb.ReadRawReceipts(pm.chainDb, hash, *number)
+ }
if results == nil {
if header := pm.blockchain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
continue
@@ -724,28 +731,30 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
}
for _, req := range req.Reqs {
// Retrieve the requested state entry, stopping if enough was found
- if header := core.GetHeader(pm.chainDb, req.BHash, core.GetBlockNumber(pm.chainDb, req.BHash)); header != nil {
- statedb, err := pm.blockchain.State()
- if err != nil {
- continue
- }
- var trie state.Trie
- if len(req.AccKey) > 0 {
- account, err := pm.getAccount(statedb, header.Root, common.BytesToHash(req.AccKey))
+ if number := rawdb.ReadHeaderNumber(pm.chainDb, req.BHash); number != nil {
+ if header := rawdb.ReadHeader(pm.chainDb, req.BHash, *number); header != nil {
+ statedb, err := pm.blockchain.State()
if err != nil {
continue
}
- trie, _ = statedb.Database().OpenStorageTrie(common.BytesToHash(req.AccKey), account.Root)
- } else {
- trie, _ = statedb.Database().OpenTrie(header.Root)
- }
- if trie != nil {
- var proof light.NodeList
- trie.Prove(req.Key, 0, &proof)
+ var trie state.Trie
+ if len(req.AccKey) > 0 {
+ account, err := pm.getAccount(statedb, header.Root, common.BytesToHash(req.AccKey))
+ if err != nil {
+ continue
+ }
+ trie, _ = statedb.Database().OpenStorageTrie(common.BytesToHash(req.AccKey), account.Root)
+ } else {
+ trie, _ = statedb.Database().OpenTrie(header.Root)
+ }
+ if trie != nil {
+ var proof light.NodeList
+ trie.Prove(req.Key, 0, &proof)
- proofs = append(proofs, proof)
- if bytes += proof.DataSize(); bytes >= softResponseLimit {
- break
+ proofs = append(proofs, proof)
+ if bytes += proof.DataSize(); bytes >= softResponseLimit {
+ break
+ }
}
}
}
@@ -782,9 +791,11 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
if statedb == nil || req.BHash != lastBHash {
statedb, root, lastBHash = nil, common.Hash{}, req.BHash
- if header := core.GetHeader(pm.chainDb, req.BHash, core.GetBlockNumber(pm.chainDb, req.BHash)); header != nil {
- statedb, _ = pm.blockchain.State()
- root = header.Root
+ if number := rawdb.ReadHeaderNumber(pm.chainDb, req.BHash); number != nil {
+ if header := rawdb.ReadHeader(pm.chainDb, req.BHash, *number); header != nil {
+ statedb, _ = pm.blockchain.State()
+ root = header.Root
+ }
}
}
if statedb == nil {
@@ -878,7 +889,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
trieDb := trie.NewDatabase(rawdb.NewTable(pm.chainDb, light.ChtTablePrefix))
for _, req := range req.Reqs {
if header := pm.blockchain.GetHeaderByNumber(req.BlockNum); header != nil {
- sectionHead := core.GetCanonicalHash(pm.chainDb, req.ChtNum*light.CHTFrequencyServer-1)
+ sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, req.ChtNum*light.CHTFrequencyServer-1)
if root := light.GetChtRoot(pm.chainDb, req.ChtNum-1, sectionHead); root != (common.Hash{}) {
trie, err := trie.New(root, trieDb)
if err != nil {
@@ -1133,10 +1144,10 @@ func (pm *ProtocolManager) getAccount(statedb *state.StateDB, root, hash common.
func (pm *ProtocolManager) getHelperTrie(id uint, idx uint64) (common.Hash, string) {
switch id {
case htCanonical:
- sectionHead := core.GetCanonicalHash(pm.chainDb, (idx+1)*light.CHTFrequencyClient-1)
+ sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, (idx+1)*light.CHTFrequencyClient-1)
return light.GetChtV2Root(pm.chainDb, idx, sectionHead), light.ChtTablePrefix
case htBloomBits:
- sectionHead := core.GetCanonicalHash(pm.chainDb, (idx+1)*light.BloomTrieFrequency-1)
+ sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, (idx+1)*light.BloomTrieFrequency-1)
return light.GetBloomTrieRoot(pm.chainDb, idx, sectionHead), light.BloomTrieTablePrefix
}
return common.Hash{}, ""
@@ -1147,8 +1158,8 @@ func (pm *ProtocolManager) getHelperTrieAuxData(req HelperTrieReq) []byte {
switch {
case req.Type == htCanonical && req.AuxReq == auxHeader && len(req.Key) == 8:
blockNum := binary.BigEndian.Uint64(req.Key)
- hash := core.GetCanonicalHash(pm.chainDb, blockNum)
- return core.GetHeaderRLP(pm.chainDb, hash, blockNum)
+ hash := rawdb.ReadCanonicalHash(pm.chainDb, blockNum)
+ return rawdb.ReadHeaderRLP(pm.chainDb, hash, blockNum)
}
return nil
}
@@ -1161,9 +1172,9 @@ func (pm *ProtocolManager) txStatus(hashes []common.Hash) []txStatus {
// If the transaction is unknown to the pool, try looking it up locally
if stat == txpool.TxStatusUnknown {
- if block, number, index := core.GetTxLookupEntry(pm.chainDb, hashes[i]); block != (common.Hash{}) {
+ if block, number, index := rawdb.ReadTxLookupEntry(pm.chainDb, hashes[i]); block != (common.Hash{}) {
stats[i].Status = txpool.TxStatusIncluded
- stats[i].Lookup = &core.TxLookupEntry{BlockHash: block, BlockIndex: number, Index: index}
+ stats[i].Lookup = &rawdb.TxLookupEntry{BlockHash: block, BlockIndex: number, Index: index}
}
}
}
diff --git a/les/handler_test.go b/les/handler_test.go
index 85d9581b3d82..609024f5a172 100644
--- a/les/handler_test.go
+++ b/les/handler_test.go
@@ -305,7 +305,7 @@ func testGetReceipt(t *testing.T, protocol int) {
block := bc.GetBlockByNumber(i)
hashes = append(hashes, block.Hash())
- receipts = append(receipts, core.GetBlockReceipts(db, block.Hash(), block.NumberU64()))
+ receipts = append(receipts, rawdb.ReadRawReceipts(db, block.Hash(), block.NumberU64()))
}
// Send the hash request and verify the response
cost := peer.GetRequestCost(GetReceiptsMsg, len(hashes))
@@ -556,9 +556,9 @@ func TestTransactionStatusLes2(t *testing.T) {
}
// check if their status is included now
- block1hash := core.GetCanonicalHash(db, 1)
- test(tx1, false, txStatus{Status: txpool.TxStatusIncluded, Lookup: &core.TxLookupEntry{BlockHash: block1hash, BlockIndex: 1, Index: 0}})
- test(tx2, false, txStatus{Status: txpool.TxStatusIncluded, Lookup: &core.TxLookupEntry{BlockHash: block1hash, BlockIndex: 1, Index: 1}})
+ block1hash := rawdb.ReadCanonicalHash(db, 1)
+ test(tx1, false, txStatus{Status: txpool.TxStatusIncluded, Lookup: &rawdb.TxLookupEntry{BlockHash: block1hash, BlockIndex: 1, Index: 0}})
+ test(tx2, false, txStatus{Status: txpool.TxStatusIncluded, Lookup: &rawdb.TxLookupEntry{BlockHash: block1hash, BlockIndex: 1, Index: 1}})
// create a reorg that rolls them back
gchain, _ = core.GenerateChain(params.TestChainConfig, chain.GetBlockByNumber(0), ethash.NewFaker(), db, 2, func(i int, block *core.BlockGen) {})
diff --git a/les/odr_requests.go b/les/odr_requests.go
index 38914156c2c1..bfc3e4a461f5 100644
--- a/les/odr_requests.go
+++ b/les/odr_requests.go
@@ -24,7 +24,7 @@ import (
"fmt"
"github.com/XinFinOrg/XDPoSChain/common"
- "github.com/XinFinOrg/XDPoSChain/core"
+ "github.com/XinFinOrg/XDPoSChain/core/rawdb"
"github.com/XinFinOrg/XDPoSChain/core/types"
"github.com/XinFinOrg/XDPoSChain/crypto"
"github.com/XinFinOrg/XDPoSChain/ethdb"
@@ -110,7 +110,7 @@ func (r *BlockRequest) Validate(db ethdb.Database, msg *Msg) error {
body := bodies[0]
// Retrieve our stored header and validate block content against it
- header := core.GetHeader(db, r.Hash, r.Number)
+ header := rawdb.ReadHeader(db, r.Hash, r.Number)
if header == nil {
return errHeaderUnavailable
}
@@ -166,7 +166,7 @@ func (r *ReceiptsRequest) Validate(db ethdb.Database, msg *Msg) error {
receipt := receipts[0]
// Retrieve our stored header and validate receipt content against it
- header := core.GetHeader(db, r.Hash, r.Number)
+ header := rawdb.ReadHeader(db, r.Hash, r.Number)
if header == nil {
return errHeaderUnavailable
}
diff --git a/les/odr_test.go b/les/odr_test.go
index ccff6c8ab840..74e944098210 100644
--- a/les/odr_test.go
+++ b/les/odr_test.go
@@ -65,9 +65,14 @@ func odrGetBlock(ctx context.Context, db ethdb.Database, config *params.ChainCon
func odrGetReceipts(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte {
var receipts types.Receipts
if bc != nil {
- receipts = core.GetBlockReceipts(db, bhash, core.GetBlockNumber(db, bhash))
+ number := rawdb.ReadHeaderNumber(db, bhash)
+ if number != nil {
+ receipts = rawdb.ReadReceipts(db, bhash, *number, config)
+ }
} else {
- receipts, _ = light.GetBlockReceipts(ctx, lc.Odr(), bhash, core.GetBlockNumber(db, bhash))
+ if number := rawdb.ReadHeaderNumber(db, bhash); number != nil {
+ receipts, _ = light.GetBlockReceipts(ctx, lc.Odr(), bhash, *number)
+ }
}
if receipts == nil {
return nil
@@ -193,7 +198,7 @@ func testOdr(t *testing.T, protocol int, expFail uint64, fn odrTestFn) {
test := func(expFail uint64) {
for i := uint64(0); i <= pm.blockchain.CurrentHeader().Number.Uint64(); i++ {
- bhash := core.GetCanonicalHash(db, i)
+ bhash := rawdb.ReadCanonicalHash(db, i)
b1 := fn(light.NoOdr, db, pm.chainConfig, pm.blockchain.(*core.BlockChain), nil, bhash)
ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
diff --git a/les/protocol.go b/les/protocol.go
index 888b0ac1795e..42c984204185 100644
--- a/les/protocol.go
+++ b/les/protocol.go
@@ -27,7 +27,7 @@ import (
"math/big"
"github.com/XinFinOrg/XDPoSChain/common"
- "github.com/XinFinOrg/XDPoSChain/core"
+ "github.com/XinFinOrg/XDPoSChain/core/rawdb"
"github.com/XinFinOrg/XDPoSChain/core/txpool"
"github.com/XinFinOrg/XDPoSChain/crypto"
"github.com/XinFinOrg/XDPoSChain/crypto/secp256k1"
@@ -225,6 +225,6 @@ type proofsData [][]rlp.RawValue
type txStatus struct {
Status txpool.TxStatus
- Lookup *core.TxLookupEntry `rlp:"nil"`
+ Lookup *rawdb.TxLookupEntry `rlp:"nil"`
Error string
}
diff --git a/les/request_test.go b/les/request_test.go
index dcee5dcf93a5..05b2de3385db 100644
--- a/les/request_test.go
+++ b/les/request_test.go
@@ -18,12 +18,12 @@ package les
import (
"context"
- "github.com/XinFinOrg/XDPoSChain/core/rawdb"
"testing"
"time"
+ "github.com/XinFinOrg/XDPoSChain/core/rawdb"
+
"github.com/XinFinOrg/XDPoSChain/common"
- "github.com/XinFinOrg/XDPoSChain/core"
"github.com/XinFinOrg/XDPoSChain/crypto"
"github.com/XinFinOrg/XDPoSChain/eth"
"github.com/XinFinOrg/XDPoSChain/ethdb"
@@ -59,15 +59,22 @@ func tfReceiptsAccess(db ethdb.Database, bhash common.Hash, number uint64) light
//func TestTrieEntryAccessLes2(t *testing.T) { testAccess(t, 2, tfTrieEntryAccess) }
func tfTrieEntryAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest {
- return &light.TrieRequest{Id: light.StateTrieID(core.GetHeader(db, bhash, core.GetBlockNumber(db, bhash))), Key: testBankSecureTrieKey}
+ if number := rawdb.ReadHeaderNumber(db, bhash); number != nil {
+ return &light.TrieRequest{Id: light.StateTrieID(rawdb.ReadHeader(db, bhash, *number)), Key: testBankSecureTrieKey}
+ }
+ return nil
}
//func TestCodeAccessLes1(t *testing.T) { testAccess(t, 1, tfCodeAccess) }
//
//func TestCodeAccessLes2(t *testing.T) { testAccess(t, 2, tfCodeAccess) }
-func tfCodeAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest {
- header := core.GetHeader(db, bhash, core.GetBlockNumber(db, bhash))
+func tfCodeAccess(db ethdb.Database, bhash common.Hash, num uint64) light.OdrRequest {
+ number := rawdb.ReadHeaderNumber(db, bhash)
+ if number != nil {
+ return nil
+ }
+ header := rawdb.ReadHeader(db, bhash, *number)
if header.Number.Uint64() < testContractDeployed {
return nil
}
@@ -100,7 +107,7 @@ func testAccess(t *testing.T, protocol int, fn accessTestFn) {
test := func(expFail uint64) {
for i := uint64(0); i <= pm.blockchain.CurrentHeader().Number.Uint64(); i++ {
- bhash := core.GetCanonicalHash(db, i)
+ bhash := rawdb.ReadCanonicalHash(db, i)
if req := fn(ldb, bhash, i); req != nil {
ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
defer cancel()
diff --git a/les/server.go b/les/server.go
index db494726e332..2a3c7bfb4c12 100644
--- a/les/server.go
+++ b/les/server.go
@@ -25,6 +25,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/common"
"github.com/XinFinOrg/XDPoSChain/core"
+ "github.com/XinFinOrg/XDPoSChain/core/rawdb"
"github.com/XinFinOrg/XDPoSChain/core/types"
"github.com/XinFinOrg/XDPoSChain/eth"
"github.com/XinFinOrg/XDPoSChain/eth/ethconfig"
@@ -330,11 +331,11 @@ func (pm *ProtocolManager) blockLoop() {
header := ev.Block.Header()
hash := header.Hash()
number := header.Number.Uint64()
- td := core.GetTd(pm.chainDb, hash, number)
+ td := rawdb.ReadTd(pm.chainDb, hash, number)
if td != nil && td.Cmp(lastBroadcastTd) > 0 {
var reorg uint64
if lastHead != nil {
- reorg = lastHead.Number.Uint64() - core.FindCommonAncestor(pm.chainDb, header, lastHead).Number.Uint64()
+ reorg = lastHead.Number.Uint64() - rawdb.FindCommonAncestor(pm.chainDb, header, lastHead).Number.Uint64()
}
lastHead = header
lastBroadcastTd = td
diff --git a/les/sync.go b/les/sync.go
index 7ee733d58406..16ad7e599aef 100644
--- a/les/sync.go
+++ b/les/sync.go
@@ -20,7 +20,7 @@ import (
"context"
"time"
- "github.com/XinFinOrg/XDPoSChain/core"
+ "github.com/XinFinOrg/XDPoSChain/core/rawdb"
"github.com/XinFinOrg/XDPoSChain/eth/downloader"
"github.com/XinFinOrg/XDPoSChain/light"
)
@@ -61,7 +61,7 @@ func (pm *ProtocolManager) syncer() {
func (pm *ProtocolManager) needToSync(peerHead blockInfo) bool {
head := pm.blockchain.CurrentHeader()
- currentTd := core.GetTd(pm.chainDb, head.Hash(), head.Number.Uint64())
+ currentTd := rawdb.ReadTd(pm.chainDb, head.Hash(), head.Number.Uint64())
return currentTd != nil && peerHead.Td.Cmp(currentTd) > 0
}
diff --git a/light/lightchain.go b/light/lightchain.go
index 4b90cf3cca7a..6a78c6ebb42a 100644
--- a/light/lightchain.go
+++ b/light/lightchain.go
@@ -138,7 +138,7 @@ func (lc *LightChain) Odr() OdrBackend {
// loadLastState loads the last known chain state from the database. This method
// assumes that the chain manager mutex is held.
func (lc *LightChain) loadLastState() error {
- if head := core.GetHeadHeaderHash(lc.chainDb); head == (common.Hash{}) {
+ if head := rawdb.ReadHeadHeaderHash(lc.chainDb); head == (common.Hash{}) {
// Corrupt or empty database, init from scratch
lc.Reset()
} else {
@@ -218,7 +218,7 @@ func (lc *LightChain) GetBody(ctx context.Context, hash common.Hash) (*types.Bod
if cached, ok := lc.bodyCache.Get(hash); ok && cached != nil {
return cached, nil
}
- body, err := GetBody(ctx, lc.odr, hash, lc.hc.GetBlockNumber(hash))
+ body, err := GetBody(ctx, lc.odr, hash, *lc.hc.GetBlockNumber(hash))
if err != nil {
return nil, err
}
@@ -234,7 +234,11 @@ func (lc *LightChain) GetBodyRLP(ctx context.Context, hash common.Hash) (rlp.Raw
if cached, ok := lc.bodyRLPCache.Get(hash); ok {
return cached, nil
}
- body, err := GetBodyRLP(ctx, lc.odr, hash, lc.hc.GetBlockNumber(hash))
+ number := lc.hc.GetBlockNumber(hash)
+ if number == nil {
+ return nil, errors.New("unknown block")
+ }
+ body, err := GetBodyRLP(ctx, lc.odr, hash, *number)
if err != nil {
return nil, err
}
@@ -269,7 +273,11 @@ func (lc *LightChain) GetBlock(ctx context.Context, hash common.Hash, number uin
// GetBlockByHash retrieves a block from the database or ODR service by hash,
// caching it if found.
func (lc *LightChain) GetBlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) {
- return lc.GetBlock(ctx, hash, lc.hc.GetBlockNumber(hash))
+ number := lc.hc.GetBlockNumber(hash)
+ if number == nil {
+ return nil, errors.New("unknown block")
+ }
+ return lc.GetBlock(ctx, hash, *number)
}
// GetBlockByNumber retrieves a block from the database or ODR service by
diff --git a/light/lightchain_test.go b/light/lightchain_test.go
index 4733810e94d9..df7808b0899b 100644
--- a/light/lightchain_test.go
+++ b/light/lightchain_test.go
@@ -124,7 +124,7 @@ func testHeaderChainImport(chain []*types.Header, lightchain *LightChain) error
}
// Manually insert the header into the database, but don't reorganize (allows subsequent testing)
lightchain.chainmu.Lock()
- core.WriteTd(lightchain.chainDb, header.Hash(), header.Number.Uint64(), new(big.Int).Add(header.Difficulty, lightchain.GetTdByHash(header.ParentHash)))
+ rawdb.WriteTd(lightchain.chainDb, header.Hash(), header.Number.Uint64(), new(big.Int).Add(header.Difficulty, lightchain.GetTdByHash(header.ParentHash)))
rawdb.WriteHeader(lightchain.chainDb, header)
lightchain.chainmu.Unlock()
}
diff --git a/light/odr.go b/light/odr.go
index ee9aa9b352ab..9eed46f143d1 100644
--- a/light/odr.go
+++ b/light/odr.go
@@ -126,7 +126,7 @@ type ReceiptsRequest struct {
// StoreResult stores the retrieved data in local database
func (req *ReceiptsRequest) StoreResult(db ethdb.Database) {
- core.WriteBlockReceipts(db, req.Hash, req.Number, req.Receipts)
+ rawdb.WriteReceipts(db, req.Hash, req.Number, req.Receipts)
}
// ChtRequest is the ODR request type for state/storage trie entries
@@ -144,7 +144,7 @@ func (req *ChtRequest) StoreResult(db ethdb.Database) {
// if there is a canonical hash, there is a header too
rawdb.WriteHeader(db, req.Header)
hash, num := req.Header.Hash(), req.Header.Number.Uint64()
- core.WriteTd(db, hash, num, req.Td)
+ rawdb.WriteTd(db, hash, num, req.Td)
rawdb.WriteCanonicalHash(db, hash, num)
}
@@ -162,11 +162,11 @@ type BloomRequest struct {
// StoreResult stores the retrieved data in local database
func (req *BloomRequest) StoreResult(db ethdb.Database) {
for i, sectionIdx := range req.SectionIdxList {
- sectionHead := core.GetCanonicalHash(db, (sectionIdx+1)*BloomTrieFrequency-1)
+ sectionHead := rawdb.ReadCanonicalHash(db, (sectionIdx+1)*BloomTrieFrequency-1)
// if we don't have the canonical hash stored for this section head number, we'll still store it under
// a key with a zero sectionHead. GetBloomBits will look there too if we still don't have the canonical
// hash. In the unlikely case we've retrieved the section head hash since then, we'll just retrieve the
// bit vector again from the network.
- core.WriteBloomBits(db, req.BitIdx, sectionIdx, sectionHead, req.BloomBits[i])
+ rawdb.WriteBloomBits(db, req.BitIdx, sectionIdx, sectionHead, req.BloomBits[i])
}
}
diff --git a/light/odr_test.go b/light/odr_test.go
index 80e42ac2eeb4..29ffe84fbf9d 100644
--- a/light/odr_test.go
+++ b/light/odr_test.go
@@ -73,9 +73,15 @@ func (odr *testOdr) Retrieve(ctx context.Context, req OdrRequest) error {
}
switch req := req.(type) {
case *BlockRequest:
- req.Rlp = core.GetBodyRLP(odr.sdb, req.Hash, core.GetBlockNumber(odr.sdb, req.Hash))
+ number := rawdb.ReadHeaderNumber(odr.sdb, req.Hash)
+ if number != nil {
+ req.Rlp = rawdb.ReadBodyRLP(odr.sdb, req.Hash, *number)
+ }
case *ReceiptsRequest:
- req.Receipts = core.GetBlockReceipts(odr.sdb, req.Hash, core.GetBlockNumber(odr.sdb, req.Hash))
+ number := rawdb.ReadHeaderNumber(odr.sdb, req.Hash)
+ if number != nil {
+ req.Receipts = rawdb.ReadRawReceipts(odr.sdb, req.Hash, *number)
+ }
case *TrieRequest:
t, _ := trie.New(req.Id.Root, trie.NewDatabase(odr.sdb))
nodes := NewNodeSet()
@@ -111,9 +117,15 @@ func TestOdrGetReceiptsLes1(t *testing.T) { testChainOdr(t, 1, odrGetReceipts) }
func odrGetReceipts(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) ([]byte, error) {
var receipts types.Receipts
if bc != nil {
- receipts = core.GetBlockReceipts(db, bhash, core.GetBlockNumber(db, bhash))
+ number := rawdb.ReadHeaderNumber(db, bhash)
+ if number != nil {
+ receipts = rawdb.ReadReceipts(db, bhash, *number, bc.Config())
+ }
} else {
- receipts, _ = GetBlockReceipts(ctx, lc.Odr(), bhash, core.GetBlockNumber(db, bhash))
+ number := rawdb.ReadHeaderNumber(db, bhash)
+ if number != nil {
+ receipts, _ = GetBlockReceipts(ctx, lc.Odr(), bhash, *number)
+ }
}
if receipts == nil {
return nil, nil
@@ -276,7 +288,7 @@ func testChainOdr(t *testing.T, protocol int, fn odrTestFn) {
test := func(expFail int) {
for i := uint64(0); i <= blockchain.CurrentHeader().Number.Uint64(); i++ {
- bhash := core.GetCanonicalHash(sdb, i)
+ bhash := rawdb.ReadCanonicalHash(sdb, i)
b1, err := fn(NoOdr, sdb, blockchain, nil, bhash)
if err != nil {
t.Fatalf("error in full-node test for block %d: %v", i, err)
diff --git a/light/odr_util.go b/light/odr_util.go
index 7b94ebe7a33d..60e538448b0b 100644
--- a/light/odr_util.go
+++ b/light/odr_util.go
@@ -22,7 +22,7 @@ import (
"errors"
"github.com/XinFinOrg/XDPoSChain/common"
- "github.com/XinFinOrg/XDPoSChain/core"
+ "github.com/XinFinOrg/XDPoSChain/core/rawdb"
"github.com/XinFinOrg/XDPoSChain/core/types"
"github.com/XinFinOrg/XDPoSChain/crypto"
"github.com/XinFinOrg/XDPoSChain/rlp"
@@ -39,10 +39,10 @@ var errNonCanonicalHash = errors.New("hash is not currently canonical")
// given number. The returned header is proven by local CHT.
func GetHeaderByNumber(ctx context.Context, odr OdrBackend, number uint64) (*types.Header, error) {
db := odr.Database()
- hash := core.GetCanonicalHash(db, number)
+ hash := rawdb.ReadCanonicalHash(db, number)
if (hash != common.Hash{}) {
// if there is a canonical hash, there is a header too
- header := core.GetHeader(db, hash, number)
+ header := rawdb.ReadHeader(db, hash, number)
if header == nil {
panic("Canonical hash present but header not found")
}
@@ -55,14 +55,14 @@ func GetHeaderByNumber(ctx context.Context, odr OdrBackend, number uint64) (*typ
)
if odr.ChtIndexer() != nil {
chtCount, sectionHeadNum, sectionHead = odr.ChtIndexer().Sections()
- canonicalHash := core.GetCanonicalHash(db, sectionHeadNum)
+ canonicalHash := rawdb.ReadCanonicalHash(db, sectionHeadNum)
// if the CHT was injected as a trusted checkpoint, we have no canonical hash yet so we accept zero hash too
for chtCount > 0 && canonicalHash != sectionHead && canonicalHash != (common.Hash{}) {
chtCount--
if chtCount > 0 {
sectionHeadNum = chtCount*CHTFrequencyClient - 1
sectionHead = odr.ChtIndexer().SectionHead(chtCount - 1)
- canonicalHash = core.GetCanonicalHash(db, sectionHeadNum)
+ canonicalHash = rawdb.ReadCanonicalHash(db, sectionHeadNum)
}
}
}
@@ -77,7 +77,7 @@ func GetHeaderByNumber(ctx context.Context, odr OdrBackend, number uint64) (*typ
}
func GetCanonicalHash(ctx context.Context, odr OdrBackend, number uint64) (common.Hash, error) {
- hash := core.GetCanonicalHash(odr.Database(), number)
+ hash := rawdb.ReadCanonicalHash(odr.Database(), number)
if (hash != common.Hash{}) {
return hash, nil
}
@@ -90,7 +90,7 @@ func GetCanonicalHash(ctx context.Context, odr OdrBackend, number uint64) (commo
// GetBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
func GetBodyRLP(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) (rlp.RawValue, error) {
- if data := core.GetBodyRLP(odr.Database(), hash, number); data != nil {
+ if data := rawdb.ReadBodyRLP(odr.Database(), hash, number); data != nil {
return data, nil
}
r := &BlockRequest{Hash: hash, Number: number}
@@ -119,7 +119,7 @@ func GetBody(ctx context.Context, odr OdrBackend, hash common.Hash, number uint6
// back from the stored header and body.
func GetBlock(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) (*types.Block, error) {
// Retrieve the block header and body contents
- header := core.GetHeader(odr.Database(), hash, number)
+ header := rawdb.ReadHeader(odr.Database(), hash, number)
if header == nil {
return nil, errNoHeader
}
@@ -134,8 +134,8 @@ func GetBlock(ctx context.Context, odr OdrBackend, hash common.Hash, number uint
// GetBlockReceipts retrieves the receipts generated by the transactions included
// in a block given by its hash. Receipts will be filled in with context data.
func GetBlockReceipts(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) (types.Receipts, error) {
- // Retrieve the potentially incomplete receipts from disk or network
- receipts := core.GetBlockReceipts(odr.Database(), hash, number)
+ // Assume receipts are already stored locally and attempt to retrieve.
+ receipts := rawdb.ReadRawReceipts(odr.Database(), hash, number)
if receipts == nil {
header, err := GetHeaderByNumber(ctx, odr, number)
if err != nil {
@@ -156,13 +156,13 @@ func GetBlockReceipts(ctx context.Context, odr OdrBackend, hash common.Hash, num
if err != nil {
return nil, err
}
- genesis := core.GetCanonicalHash(odr.Database(), 0)
- config, _ := core.GetChainConfig(odr.Database(), genesis)
+ genesis := rawdb.ReadCanonicalHash(odr.Database(), 0)
+ config, _ := rawdb.ReadChainConfig(odr.Database(), genesis)
if err := receipts.DeriveFields(config, hash, number, block.BaseFee(), block.Transactions()); err != nil {
return nil, err
}
- core.WriteBlockReceipts(odr.Database(), hash, number, receipts)
+ rawdb.WriteReceipts(odr.Database(), hash, number, receipts)
}
return receipts, nil
}
@@ -170,7 +170,7 @@ func GetBlockReceipts(ctx context.Context, odr OdrBackend, hash common.Hash, num
// GetBlockLogs retrieves the logs generated by the transactions included in a
// block given by its hash. Logs will be filled in with context data.
func GetBlockLogs(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) ([][]*types.Log, error) {
- receipts := core.GetBlockReceipts(odr.Database(), hash, number)
+ receipts, _ := GetBlockReceipts(ctx, odr, hash, number)
if receipts == nil {
r := &ReceiptsRequest{Hash: hash, Number: number}
if err := odr.Retrieve(ctx, r); err != nil {
@@ -201,24 +201,24 @@ func GetBloomBits(ctx context.Context, odr OdrBackend, bitIdx uint, sectionIdxLi
)
if odr.BloomTrieIndexer() != nil {
bloomTrieCount, sectionHeadNum, sectionHead = odr.BloomTrieIndexer().Sections()
- canonicalHash := core.GetCanonicalHash(db, sectionHeadNum)
+ canonicalHash := rawdb.ReadCanonicalHash(db, sectionHeadNum)
// if the BloomTrie was injected as a trusted checkpoint, we have no canonical hash yet so we accept zero hash too
for bloomTrieCount > 0 && canonicalHash != sectionHead && canonicalHash != (common.Hash{}) {
bloomTrieCount--
if bloomTrieCount > 0 {
sectionHeadNum = bloomTrieCount*BloomTrieFrequency - 1
sectionHead = odr.BloomTrieIndexer().SectionHead(bloomTrieCount - 1)
- canonicalHash = core.GetCanonicalHash(db, sectionHeadNum)
+ canonicalHash = rawdb.ReadCanonicalHash(db, sectionHeadNum)
}
}
}
for i, sectionIdx := range sectionIdxList {
- sectionHead := core.GetCanonicalHash(db, (sectionIdx+1)*BloomTrieFrequency-1)
+ sectionHead := rawdb.ReadCanonicalHash(db, (sectionIdx+1)*BloomTrieFrequency-1)
// if we don't have the canonical hash stored for this section head number, we'll still look for
// an entry with a zero sectionHead (we store it with zero section head too if we don't know it
// at the time of the retrieval)
- bloomBits, err := core.GetBloomBits(db, bitIdx, sectionIdx, sectionHead)
+ bloomBits, err := rawdb.ReadBloomBits(db, bitIdx, sectionIdx, sectionHead)
if err == nil {
result[i] = bloomBits
} else {
diff --git a/light/postprocess.go b/light/postprocess.go
index 5f6799cf7afb..eccaa902da79 100644
--- a/light/postprocess.go
+++ b/light/postprocess.go
@@ -162,7 +162,7 @@ func (c *ChtIndexerBackend) Process(header *types.Header) {
hash, num := header.Hash(), header.Number.Uint64()
c.lastHash = hash
- td := core.GetTd(c.diskdb, hash, num)
+ td := rawdb.ReadTd(c.diskdb, hash, num)
if td == nil {
panic("ChtIndexerBackend Process: td == nil")
}
@@ -273,7 +273,7 @@ func (b *BloomTrieIndexerBackend) Commit() error {
binary.BigEndian.PutUint64(encKey[2:10], b.section)
var decomp []byte
for j := uint64(0); j < b.bloomTrieRatio; j++ {
- data, err := core.GetBloomBits(b.diskdb, i, b.section*b.bloomTrieRatio+j, b.sectionHeads[j])
+ data, err := rawdb.ReadBloomBits(b.diskdb, i, b.section*b.bloomTrieRatio+j, b.sectionHeads[j])
if err != nil {
return err
}
diff --git a/light/txpool.go b/light/txpool.go
index 7a7bf619d7f3..4b2aa814bf34 100644
--- a/light/txpool.go
+++ b/light/txpool.go
@@ -190,9 +190,7 @@ func (p *TxPool) checkMinedTxs(ctx context.Context, hash common.Hash, number uin
if _, err := GetBlockReceipts(ctx, p.odr, hash, number); err != nil { // ODR caches, ignore results
return err
}
- if err := core.WriteTxLookupEntries(p.chainDb, block); err != nil {
- return err
- }
+ rawdb.WriteTxLookupEntriesByBlock(p.chainDb, block)
// Update the transaction pool's state
for _, tx := range list {
delete(p.pending, tx.Hash())
@@ -267,7 +265,7 @@ func (p *TxPool) reorgOnNewHead(ctx context.Context, newHeader *types.Header) (t
idx2 := idx - txPermanent
if len(p.mined) > 0 {
for i := p.clearIdx; i < idx2; i++ {
- hash := core.GetCanonicalHash(p.chainDb, i)
+ hash := rawdb.ReadCanonicalHash(p.chainDb, i)
if list, ok := p.mined[hash]; ok {
hashes := make([]common.Hash, len(list))
for i, tx := range list {