diff --git a/cmd/geth/main.go b/cmd/geth/main.go index e0f29bce77..5187971423 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -125,6 +125,7 @@ var ( utils.CachePreimagesFlag, utils.PersistDiffFlag, utils.DiffBlockFlag, + utils.PruneAncientDataFlag, utils.ListenPortFlag, utils.MaxPeersFlag, utils.MaxPendingPeersFlag, diff --git a/cmd/geth/pruneblock_test.go b/cmd/geth/pruneblock_test.go index 7d78f444fa..78b1d343b5 100644 --- a/cmd/geth/pruneblock_test.go +++ b/cmd/geth/pruneblock_test.go @@ -93,7 +93,7 @@ func testOfflineBlockPruneWithAmountReserved(t *testing.T, amountReserved uint64 t.Fatalf("Failed to back up block: %v", err) } - dbBack, err := rawdb.NewLevelDBDatabaseWithFreezer(chaindbPath, 0, 0, newAncientPath, "", false, true, false) + dbBack, err := rawdb.NewLevelDBDatabaseWithFreezer(chaindbPath, 0, 0, newAncientPath, "", false, true, false, false) if err != nil { t.Fatalf("failed to create database with ancient backend") } @@ -139,7 +139,7 @@ func testOfflineBlockPruneWithAmountReserved(t *testing.T, amountReserved uint64 func BlockchainCreator(t *testing.T, chaindbPath, AncientPath string, blockRemain uint64) (ethdb.Database, []*types.Block, []*types.Block, []types.Receipts, []*big.Int, uint64, *core.BlockChain) { //create a database with ancient freezer - db, err := rawdb.NewLevelDBDatabaseWithFreezer(chaindbPath, 0, 0, AncientPath, "", false, false, false) + db, err := rawdb.NewLevelDBDatabaseWithFreezer(chaindbPath, 0, 0, AncientPath, "", false, false, false, false) if err != nil { t.Fatalf("failed to create database with ancient backend") } diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 85d1cef887..218ecfcf3c 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -457,6 +457,10 @@ var ( Usage: "The number of blocks should be persisted in db (default = 86400)", Value: uint64(86400), } + PruneAncientDataFlag = cli.BoolFlag{ + Name: "pruneancient", + Usage: "Prune ancient data, recommends to the user who don't care about the ancient data. Note that once be turned on, the ancient data will not be recovered again", + } // Miner settings MiningEnabledFlag = cli.BoolFlag{ Name: "mine", @@ -1621,6 +1625,13 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { if ctx.GlobalIsSet(DiffBlockFlag.Name) { cfg.DiffBlock = ctx.GlobalUint64(DiffBlockFlag.Name) } + if ctx.GlobalIsSet(PruneAncientDataFlag.Name) { + if cfg.SyncMode == downloader.FullSync { + cfg.PruneAncientData = ctx.GlobalBool(PruneAncientDataFlag.Name) + } else { + log.Crit("pruneancient parameter didn't take effect for current syncmode") + } + } if gcmode := ctx.GlobalString(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" { Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name) } @@ -1914,7 +1925,7 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly, disableFree chainDb, err = stack.OpenDatabase(name, cache, handles, "", readonly) } else { name := "chaindata" - chainDb, err = stack.OpenDatabaseWithFreezer(name, cache, handles, ctx.GlobalString(AncientFlag.Name), "", readonly, disableFreeze, false) + chainDb, err = stack.OpenDatabaseWithFreezer(name, cache, handles, ctx.GlobalString(AncientFlag.Name), "", readonly, disableFreeze, false, false) } if err != nil { Fatalf("Could not open database: %v", err) diff --git a/core/blockchain.go b/core/blockchain.go index eeaf1b7e0a..c0b73519e1 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -441,6 +441,8 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par } bc.snaps, _ = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, int(bc.cacheConfig.TriesInMemory), head.Root(), !bc.cacheConfig.SnapshotWait, true, recover) } + // write safe point block number + rawdb.WriteSafePointBlockNumber(bc.db, bc.CurrentBlock().NumberU64()) // do options before start any routine for _, option := range options { bc = option(bc) @@ -543,6 +545,7 @@ func (bc *BlockChain) loadLastState() error { log.Warn("Head block missing, resetting chain", "hash", head) return bc.Reset() } + // Everything seems to be fine, set as the head block bc.currentBlock.Store(currentBlock) headBlockGauge.Update(int64(currentBlock.NumberU64())) @@ -927,6 +930,7 @@ func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error { // Note, this function assumes that the `mu` mutex is held! func (bc *BlockChain) writeHeadBlock(block *types.Block) { // If the block is on a side chain or an unknown one, force other heads onto it too + // read from kvdb, has nothing to do with ancientdb type updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash() // Add the block to the canonical chain number scheme and mark as the head @@ -1258,6 +1262,8 @@ func (bc *BlockChain) Stop() { log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root()) if err := triedb.Commit(recent.Root(), true, nil); err != nil { log.Error("Failed to commit recent state trie", "err", err) + } else { + rawdb.WriteSafePointBlockNumber(bc.db, recent.NumberU64()) } } } @@ -1265,6 +1271,8 @@ func (bc *BlockChain) Stop() { log.Info("Writing snapshot state to disk", "root", snapBase) if err := triedb.Commit(snapBase, true, nil); err != nil { log.Error("Failed to commit recent state trie", "err", err) + } else { + rawdb.WriteSafePointBlockNumber(bc.db, bc.CurrentBlock().NumberU64()) } } for !bc.triegc.Empty() { @@ -1764,6 +1772,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. } // Flush an entire trie and restart the counters triedb.Commit(header.Root, true, nil) + rawdb.WriteSafePointBlockNumber(bc.db, chosen) lastWrite = chosen bc.gcproc = 0 } @@ -2355,6 +2364,9 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i for i := len(hashes) - 1; i >= 0; i-- { // Append the next block to our batch block := bc.GetBlock(hashes[i], numbers[i]) + if block == nil { + log.Crit("Importing heavy sidechain block is nil", "hash", hashes[i], "number", numbers[i]) + } blocks = append(blocks, block) memory += block.Size() diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go index b85f9c9e00..8edfe76c1a 100644 --- a/core/blockchain_repair_test.go +++ b/core/blockchain_repair_test.go @@ -1762,7 +1762,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { } os.RemoveAll(datadir) - db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false, false, false) + db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false, false, false, false) if err != nil { t.Fatalf("Failed to create persistent database: %v", err) } @@ -1832,7 +1832,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { db.Close() // Start a new blockchain back up and see where the repait leads us - db, err = rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false, false, false) + db, err = rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false, false, false, false) if err != nil { t.Fatalf("Failed to reopen persistent database: %v", err) } diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go index bdcca988a3..2632da65e0 100644 --- a/core/blockchain_sethead_test.go +++ b/core/blockchain_sethead_test.go @@ -1961,7 +1961,7 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) { } os.RemoveAll(datadir) - db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false, false, false) + db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false, false, false, false) if err != nil { t.Fatalf("Failed to create persistent database: %v", err) } diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go index 530ad035dd..cbab105293 100644 --- a/core/blockchain_snapshot_test.go +++ b/core/blockchain_snapshot_test.go @@ -64,7 +64,7 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo } os.RemoveAll(datadir) - db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false, false, false) + db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false, false, false, false) if err != nil { t.Fatalf("Failed to create persistent database: %v", err) } @@ -248,7 +248,7 @@ func (snaptest *crashSnapshotTest) test(t *testing.T) { db.Close() // Start a new blockchain back up and see where the repair leads us - newdb, err := rawdb.NewLevelDBDatabaseWithFreezer(snaptest.datadir, 0, 0, snaptest.datadir, "", false, false, false) + newdb, err := rawdb.NewLevelDBDatabaseWithFreezer(snaptest.datadir, 0, 0, snaptest.datadir, "", false, false, false, false) if err != nil { t.Fatalf("Failed to reopen persistent database: %v", err) } diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 07cb51933a..f7a4ff53f1 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -761,7 +761,7 @@ func TestFastVsFullChains(t *testing.T) { t.Fatalf("failed to create temp freezer dir: %v", err) } defer os.Remove(frdir) - ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false) + ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false, false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -835,7 +835,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) { t.Fatalf("failed to create temp freezer dir: %v", err) } defer os.Remove(dir) - db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "", false, false, false) + db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "", false, false, false, false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -1702,7 +1702,7 @@ func TestBlockchainRecovery(t *testing.T) { } defer os.Remove(frdir) - ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false) + ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false, false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -1759,7 +1759,7 @@ func TestIncompleteAncientReceiptChainInsertion(t *testing.T) { t.Fatalf("failed to create temp freezer dir: %v", err) } defer os.Remove(frdir) - ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false) + ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false, false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -1958,7 +1958,7 @@ func testInsertKnownChainData(t *testing.T, typ string) { t.Fatalf("failed to create temp freezer dir: %v", err) } defer os.Remove(dir) - chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "", false, false, false) + chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "", false, false, false, false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -2238,7 +2238,7 @@ func TestTransactionIndices(t *testing.T) { t.Fatalf("failed to create temp freezer dir: %v", err) } defer os.Remove(frdir) - ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false) + ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false, false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -2266,7 +2266,7 @@ func TestTransactionIndices(t *testing.T) { // Init block chain with external ancients, check all needed indices has been indexed. limit := []uint64{0, 32, 64, 128} for _, l := range limit { - ancientDb, err = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false) + ancientDb, err = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false, false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -2286,7 +2286,7 @@ func TestTransactionIndices(t *testing.T) { } // Reconstruct a block chain which only reserves HEAD-64 tx indices - ancientDb, err = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false) + ancientDb, err = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false, false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -2365,7 +2365,7 @@ func TestSkipStaleTxIndicesInFastSync(t *testing.T) { t.Fatalf("failed to create temp freezer dir: %v", err) } defer os.Remove(frdir) - ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false) + ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false, false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } diff --git a/core/headerchain.go b/core/headerchain.go index 3e50c1eb07..2849468dee 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -250,6 +250,7 @@ func (hc *HeaderChain) writeHeaders(headers []*types.Header) (result *headerWrit headHeader = hc.GetHeader(headHash, headNumber) ) for rawdb.ReadCanonicalHash(hc.chainDb, headNumber) != headHash { + // backtracking to ancientdb if frozen, _ := hc.chainDb.Ancients(); frozen == headNumber { break } diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index f99511e456..b38c843dc6 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -440,7 +440,7 @@ func TestAncientStorage(t *testing.T) { } defer os.Remove(frdir) - db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false, false, false) + db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false, false, false, false) if err != nil { t.Fatalf("failed to create database with ancient backend") } diff --git a/core/rawdb/accessors_metadata.go b/core/rawdb/accessors_metadata.go index 079e335fa6..fbceab6141 100644 --- a/core/rawdb/accessors_metadata.go +++ b/core/rawdb/accessors_metadata.go @@ -18,6 +18,7 @@ package rawdb import ( "encoding/json" + "math/big" "time" "github.com/ethereum/go-ethereum/common" @@ -27,6 +28,12 @@ import ( "github.com/ethereum/go-ethereum/rlp" ) +// FreezerType enumerator +const ( + EntireFreezerType uint64 = iota // classic ancient type + PruneFreezerType // prune ancient type +) + // ReadDatabaseVersion retrieves the version number of the database. func ReadDatabaseVersion(db ethdb.KeyValueReader) *uint64 { var version uint64 @@ -138,3 +145,83 @@ func PopUncleanShutdownMarker(db ethdb.KeyValueStore) { log.Warn("Failed to clear unclean-shutdown marker", "err", err) } } + +// ReadOffSetOfCurrentAncientFreezer return prune block start +func ReadOffSetOfCurrentAncientFreezer(db ethdb.KeyValueReader) uint64 { + offset, _ := db.Get(offSetOfCurrentAncientFreezer) + if offset == nil { + return 0 + } + return new(big.Int).SetBytes(offset).Uint64() +} + +// WriteOffSetOfCurrentAncientFreezer write prune block start +func WriteOffSetOfCurrentAncientFreezer(db ethdb.KeyValueWriter, offset uint64) { + if err := db.Put(offSetOfCurrentAncientFreezer, new(big.Int).SetUint64(offset).Bytes()); err != nil { + log.Crit("Failed to store offSetOfAncientFreezer", "err", err) + } +} + +// ReadOffSetOfLastAncientFreezer return last prune block start +func ReadOffSetOfLastAncientFreezer(db ethdb.KeyValueReader) uint64 { + offset, _ := db.Get(offSetOfLastAncientFreezer) + if offset == nil { + return 0 + } + return new(big.Int).SetBytes(offset).Uint64() +} + +// WriteOffSetOfLastAncientFreezer wirte before prune block start +func WriteOffSetOfLastAncientFreezer(db ethdb.KeyValueWriter, offset uint64) { + if err := db.Put(offSetOfLastAncientFreezer, new(big.Int).SetUint64(offset).Bytes()); err != nil { + log.Crit("Failed to store offSetOfAncientFreezer", "err", err) + } +} + +// ReadFrozenOfAncientFreezer return freezer block number +func ReadFrozenOfAncientFreezer(db ethdb.KeyValueReader) uint64 { + fozen, _ := db.Get(frozenOfAncientDBKey) + if fozen == nil { + return 0 + } + return new(big.Int).SetBytes(fozen).Uint64() +} + +// WriteFrozenOfAncientFreezer write freezer block number +func WriteFrozenOfAncientFreezer(db ethdb.KeyValueWriter, frozen uint64) { + if err := db.Put(frozenOfAncientDBKey, new(big.Int).SetUint64(frozen).Bytes()); err != nil { + log.Crit("Failed to store offSetOfAncientFreezer", "err", err) + } +} + +// ReadSafePointBlockNumber return the number of block that roothash save to disk +func ReadSafePointBlockNumber(db ethdb.KeyValueReader) uint64 { + num, _ := db.Get(LastSafePointBlockKey) + if num == nil { + return 0 + } + return new(big.Int).SetBytes(num).Uint64() +} + +// WriteSafePointBlockNumber write the number of block that roothash save to disk +func WriteSafePointBlockNumber(db ethdb.KeyValueWriter, number uint64) { + if err := db.Put(LastSafePointBlockKey, new(big.Int).SetUint64(number).Bytes()); err != nil { + log.Crit("Failed to store offSetOfAncientFreezer", "err", err) + } +} + +// ReadAncientType return freezer type +func ReadAncientType(db ethdb.KeyValueReader) uint64 { + data, _ := db.Get(pruneAncientKey) + if data == nil { + return EntireFreezerType + } + return new(big.Int).SetBytes(data).Uint64() +} + +// WriteAncientType write freezer type +func WriteAncientType(db ethdb.KeyValueWriter, flag uint64) { + if err := db.Put(pruneAncientKey, new(big.Int).SetUint64(flag).Bytes()); err != nil { + log.Crit("Failed to store offSetOfAncientFreezer", "err", err) + } +} diff --git a/core/rawdb/chain_iterator.go b/core/rawdb/chain_iterator.go index 883e17b782..2fbd637ff6 100644 --- a/core/rawdb/chain_iterator.go +++ b/core/rawdb/chain_iterator.go @@ -181,6 +181,9 @@ func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool // signal received. func indexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) { // short circuit for invalid range + if offset := db.AncientOffSet(); offset > from { + from = offset + } if from >= to { return } @@ -272,6 +275,9 @@ func indexTransactionsForTesting(db ethdb.Database, from uint64, to uint64, inte // signal received. func unindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) { // short circuit for invalid range + if offset := db.AncientOffSet(); offset > from { + from = offset + } if from >= to { return } diff --git a/core/rawdb/database.go b/core/rawdb/database.go index 3342b36f36..21f90f5654 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -20,7 +20,6 @@ import ( "bytes" "errors" "fmt" - "math/big" "os" "sync/atomic" "time" @@ -159,33 +158,6 @@ func NewDatabase(db ethdb.KeyValueStore) ethdb.Database { } } -func ReadOffSetOfCurrentAncientFreezer(db ethdb.KeyValueReader) uint64 { - offset, _ := db.Get(offSetOfCurrentAncientFreezer) - if offset == nil { - return 0 - } - return new(big.Int).SetBytes(offset).Uint64() -} - -func ReadOffSetOfLastAncientFreezer(db ethdb.KeyValueReader) uint64 { - offset, _ := db.Get(offSetOfLastAncientFreezer) - if offset == nil { - return 0 - } - return new(big.Int).SetBytes(offset).Uint64() -} - -func WriteOffSetOfCurrentAncientFreezer(db ethdb.KeyValueWriter, offset uint64) { - if err := db.Put(offSetOfCurrentAncientFreezer, new(big.Int).SetUint64(offset).Bytes()); err != nil { - log.Crit("Failed to store offSetOfAncientFreezer", "err", err) - } -} -func WriteOffSetOfLastAncientFreezer(db ethdb.KeyValueWriter, offset uint64) { - if err := db.Put(offSetOfLastAncientFreezer, new(big.Int).SetUint64(offset).Bytes()); err != nil { - log.Crit("Failed to store offSetOfAncientFreezer", "err", err) - } -} - // NewFreezerDb only create a freezer without statedb. func NewFreezerDb(db ethdb.KeyValueStore, frz, namespace string, readonly bool, newOffSet uint64) (*freezer, error) { // Create the idle freezer instance, this operation should be atomic to avoid mismatch between offset and acientDB. @@ -201,7 +173,29 @@ func NewFreezerDb(db ethdb.KeyValueStore, frz, namespace string, readonly bool, // NewDatabaseWithFreezer creates a high level database on top of a given key- // value data store with a freezer moving immutable chain segments into cold // storage. -func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace string, readonly, disableFreeze, isLastOffset bool) (ethdb.Database, error) { +func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace string, readonly, disableFreeze, isLastOffset, pruneAncientData bool) (ethdb.Database, error) { + if pruneAncientData && !disableFreeze && !readonly { + frdb, err := newPrunedFreezer(freezer, db) + if err != nil { + return nil, err + } + + go frdb.freeze() + WriteAncientType(db, PruneFreezerType) + return &freezerdb{ + KeyValueStore: db, + AncientStore: frdb, + }, nil + } + + if pruneAncientData { + log.Error("pruneancient not take effect, disableFreezer or readonly be set") + } + + if ReadAncientType(db) == PruneFreezerType { + log.Warn("prune ancinet flag is set, may start fail, can add pruneancient parameter resolve") + } + // Create the idle freezer instance frdb, err := newFreezer(freezer, namespace, readonly) if err != nil { @@ -289,7 +283,8 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace st // feezer. } } - + // no prune ancinet start success + WriteAncientType(db, EntireFreezerType) // Freezer is consistent with the key-value database, permit combining the two if !disableFreeze && !frdb.readonly { go frdb.freeze(db) @@ -325,12 +320,12 @@ func NewLevelDBDatabase(file string, cache int, handles int, namespace string, r // NewLevelDBDatabaseWithFreezer creates a persistent key-value database with a // freezer moving immutable chain segments into cold storage. -func NewLevelDBDatabaseWithFreezer(file string, cache int, handles int, freezer string, namespace string, readonly, disableFreeze, isLastOffset bool) (ethdb.Database, error) { +func NewLevelDBDatabaseWithFreezer(file string, cache int, handles int, freezer string, namespace string, readonly, disableFreeze, isLastOffset, pruneAncientData bool) (ethdb.Database, error) { kvdb, err := leveldb.New(file, cache, handles, namespace, readonly) if err != nil { return nil, err } - frdb, err := NewDatabaseWithFreezer(kvdb, freezer, namespace, readonly, disableFreeze, isLastOffset) + frdb, err := NewDatabaseWithFreezer(kvdb, freezer, namespace, readonly, disableFreeze, isLastOffset, pruneAncientData) if err != nil { kvdb.Close() return nil, err diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go index 92dfc9604a..0dbdf94bae 100644 --- a/core/rawdb/freezer.go +++ b/core/rawdb/freezer.go @@ -310,9 +310,7 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) { } select { case <-time.NewTimer(freezerRecheckInterval).C: - backoff = false case triggered = <-f.trigger: - backoff = false case <-f.quit: return } @@ -397,83 +395,10 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) { if err := f.Sync(); err != nil { log.Crit("Failed to flush frozen tables", "err", err) } - // Wipe out all data from the active database - batch := db.NewBatch() - for i := 0; i < len(ancients); i++ { - // Always keep the genesis block in active database - if first+uint64(i) != 0 { - DeleteBlockWithoutNumber(batch, ancients[i], first+uint64(i)) - DeleteCanonicalHash(batch, first+uint64(i)) - } - } - if err := batch.Write(); err != nil { - log.Crit("Failed to delete frozen canonical blocks", "err", err) - } - batch.Reset() - - // Wipe out side chains also and track dangling side chians - var dangling []common.Hash - for number := first; number < f.frozen; number++ { - // Always keep the genesis block in active database - if number != 0 { - dangling = ReadAllHashes(db, number) - for _, hash := range dangling { - log.Trace("Deleting side chain", "number", number, "hash", hash) - DeleteBlock(batch, hash, number) - } - } - } - if err := batch.Write(); err != nil { - log.Crit("Failed to delete frozen side blocks", "err", err) - } - batch.Reset() - - // Step into the future and delete and dangling side chains - if f.frozen > 0 { - tip := f.frozen - for len(dangling) > 0 { - drop := make(map[common.Hash]struct{}) - for _, hash := range dangling { - log.Debug("Dangling parent from freezer", "number", tip-1, "hash", hash) - drop[hash] = struct{}{} - } - children := ReadAllHashes(db, tip) - for i := 0; i < len(children); i++ { - // Dig up the child and ensure it's dangling - child := ReadHeader(nfdb, children[i], tip) - if child == nil { - log.Error("Missing dangling header", "number", tip, "hash", children[i]) - continue - } - if _, ok := drop[child.ParentHash]; !ok { - children = append(children[:i], children[i+1:]...) - i-- - continue - } - // Delete all block data associated with the child - log.Debug("Deleting dangling block", "number", tip, "hash", children[i], "parent", child.ParentHash) - DeleteBlock(batch, children[i], tip) - } - dangling = children - tip++ - } - if err := batch.Write(); err != nil { - log.Crit("Failed to delete dangling side blocks", "err", err) - } - } - // Log something friendly for the user - context := []interface{}{ - "blocks", f.frozen - first, "elapsed", common.PrettyDuration(time.Since(start)), "number", f.frozen - 1, - } - if n := len(ancients); n > 0 { - context = append(context, []interface{}{"hash", ancients[n-1]}...) - } - log.Info("Deep froze chain segment", context...) - // Avoid database thrashing with tiny writes - if f.frozen-first < freezerBatchLimit { - backoff = true - } + // Batch of blocks have been frozen, flush them before wiping from leveldb + backoff = f.frozen-first >= freezerBatchLimit + gcKvStore(db, ancients, first, f.frozen, start) } } @@ -494,3 +419,81 @@ func (f *freezer) repair() error { atomic.StoreUint64(&f.frozen, min) return nil } + +// delete leveldb data that save to ancientdb, split from func freeze +func gcKvStore(db ethdb.KeyValueStore, ancients []common.Hash, first uint64, frozen uint64, start time.Time) { + // Wipe out all data from the active database + batch := db.NewBatch() + for i := 0; i < len(ancients); i++ { + // Always keep the genesis block in active database + if blockNumber := first + uint64(i); blockNumber != 0 { + DeleteBlockWithoutNumber(batch, ancients[i], blockNumber) + DeleteCanonicalHash(batch, blockNumber) + } + } + if err := batch.Write(); err != nil { + log.Crit("Failed to delete frozen canonical blocks", "err", err) + } + batch.Reset() + + // Wipe out side chains also and track dangling side chians + var dangling []common.Hash + for number := first; number < frozen; number++ { + // Always keep the genesis block in active database + if number != 0 { + dangling = ReadAllHashes(db, number) + for _, hash := range dangling { + log.Trace("Deleting side chain", "number", number, "hash", hash) + DeleteBlock(batch, hash, number) + } + } + } + if err := batch.Write(); err != nil { + log.Crit("Failed to delete frozen side blocks", "err", err) + } + batch.Reset() + + // Step into the future and delete and dangling side chains + if frozen > 0 { + tip := frozen + nfdb := &nofreezedb{KeyValueStore: db} + for len(dangling) > 0 { + drop := make(map[common.Hash]struct{}) + for _, hash := range dangling { + log.Debug("Dangling parent from freezer", "number", tip-1, "hash", hash) + drop[hash] = struct{}{} + } + children := ReadAllHashes(db, tip) + for i := 0; i < len(children); i++ { + // Dig up the child and ensure it's dangling + child := ReadHeader(nfdb, children[i], tip) + if child == nil { + log.Error("Missing dangling header", "number", tip, "hash", children[i]) + continue + } + if _, ok := drop[child.ParentHash]; !ok { + children = append(children[:i], children[i+1:]...) + i-- + continue + } + // Delete all block data associated with the child + log.Debug("Deleting dangling block", "number", tip, "hash", children[i], "parent", child.ParentHash) + DeleteBlock(batch, children[i], tip) + } + dangling = children + tip++ + } + if err := batch.Write(); err != nil { + log.Crit("Failed to delete dangling side blocks", "err", err) + } + } + + // Log something friendly for the user + context := []interface{}{ + "blocks", frozen - first, "elapsed", common.PrettyDuration(time.Since(start)), "number", frozen - 1, + } + if n := len(ancients); n > 0 { + context = append(context, []interface{}{"hash", ancients[n-1]}...) + } + log.Info("Deep froze chain segment", context...) +} diff --git a/core/rawdb/prunedfreezer.go b/core/rawdb/prunedfreezer.go new file mode 100644 index 0000000000..b82993e6ec --- /dev/null +++ b/core/rawdb/prunedfreezer.go @@ -0,0 +1,285 @@ +package rawdb + +import ( + "math" + "os" + "path/filepath" + "sync" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + "github.com/prometheus/tsdb/fileutil" +) + +// prunedfreezer not contain ancient data, only record 'frozen' , the next recycle block number form kvstore. +type prunedfreezer struct { + db ethdb.KeyValueStore // Meta database + // WARNING: The `frozen` field is accessed atomically. On 32 bit platforms, only + // 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned, + // so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG). + frozen uint64 // BlockNumber of next frozen block + threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests) + + instanceLock fileutil.Releaser // File-system lock to prevent double opens + quit chan struct{} + closeOnce sync.Once +} + +// newNoDataFreezer creates a chain freezer that deletes data enough ‘old’. +func newPrunedFreezer(datadir string, db ethdb.KeyValueStore) (*prunedfreezer, error) { + if info, err := os.Lstat(datadir); !os.IsNotExist(err) { + if info.Mode()&os.ModeSymlink != 0 { + log.Warn("Symbolic link ancient database is not supported", "path", datadir) + return nil, errSymlinkDatadir + } + } + + lock, _, err := fileutil.Flock(filepath.Join(datadir, "../NODATA_ANCIENT_FLOCK")) + if err != nil { + return nil, err + } + + freezer := &prunedfreezer{ + db: db, + threshold: params.FullImmutabilityThreshold, + instanceLock: lock, + quit: make(chan struct{}), + } + + if err := freezer.repair(datadir); err != nil { + return nil, err + } + + log.Info("Opened ancientdb with nodata mode", "database", datadir, "frozen", freezer.frozen) + return freezer, nil +} + +// repair init frozen , compatible disk-ancientdb and pruner-block-tool. +func (f *prunedfreezer) repair(datadir string) error { + // compatible prune-block-tool + offset := ReadOffSetOfCurrentAncientFreezer(f.db) + + // compatible freezer + min := uint64(math.MaxUint64) + for name, disableSnappy := range FreezerNoSnappy { + table, err := NewFreezerTable(datadir, name, disableSnappy) + if err != nil { + return err + } + items := atomic.LoadUint64(&table.items) + if min > items { + min = items + } + table.Close() + } + offset += min + + if frozen := ReadFrozenOfAncientFreezer(f.db); frozen > offset { + offset = frozen + } + + atomic.StoreUint64(&f.frozen, offset) + if err := f.Sync(); err != nil { + return nil + } + return nil +} + +// Close terminates the chain prunedfreezer. +func (f *prunedfreezer) Close() error { + var err error + f.closeOnce.Do(func() { + close(f.quit) + f.Sync() + err = f.instanceLock.Release() + }) + return err +} + +// HasAncient returns an indicator whether the specified ancient data exists, return nil. +func (f *prunedfreezer) HasAncient(kind string, number uint64) (bool, error) { + return false, nil +} + +// Ancient retrieves an ancient binary blob from prunedfreezer, return nil. +func (f *prunedfreezer) Ancient(kind string, number uint64) ([]byte, error) { + if _, ok := FreezerNoSnappy[kind]; ok { + if number >= atomic.LoadUint64(&f.frozen) { + return nil, errOutOfBounds + } + return nil, nil + } + return nil, errUnknownTable +} + +// Ancients returns the last of the frozen items. +func (f *prunedfreezer) Ancients() (uint64, error) { + return atomic.LoadUint64(&f.frozen), nil +} + +// ItemAmountInAncient returns the actual length of current ancientDB, return 0. +func (f *prunedfreezer) ItemAmountInAncient() (uint64, error) { + return 0, nil +} + +// AncientOffSet returns the offset of current ancientDB, offset == frozen. +func (f *prunedfreezer) AncientOffSet() uint64 { + return atomic.LoadUint64(&f.frozen) +} + +// AncientSize returns the ancient size of the specified category, return 0. +func (f *prunedfreezer) AncientSize(kind string) (uint64, error) { + if _, ok := FreezerNoSnappy[kind]; ok { + return 0, nil + } + return 0, errUnknownTable +} + +// AppendAncient update frozen. +// +// Notably, this function is lock free but kind of thread-safe. All out-of-order +// injection will be rejected. But if two injections with same number happen at +// the same time, we can get into the trouble. +func (f *prunedfreezer) AppendAncient(number uint64, hash, header, body, receipts, td []byte) (err error) { + if atomic.LoadUint64(&f.frozen) != number { + return errOutOrderInsertion + } + atomic.AddUint64(&f.frozen, 1) + return nil +} + +// TruncateAncients discards any recent data above the provided threshold number, always success. +func (f *prunedfreezer) TruncateAncients(items uint64) error { + if atomic.LoadUint64(&f.frozen) <= items { + return nil + } + atomic.StoreUint64(&f.frozen, items) + WriteFrozenOfAncientFreezer(f.db, atomic.LoadUint64(&f.frozen)) + return nil +} + +// Sync flushes meta data tables to disk. +func (f *prunedfreezer) Sync() error { + WriteFrozenOfAncientFreezer(f.db, atomic.LoadUint64(&f.frozen)) + return nil +} + +// freeze is a background thread that periodically checks the blockchain for any +// import progress and moves ancient data from the fast database into the freezer. +// +// This functionality is deliberately broken off from block importing to avoid +// incurring additional data shuffling delays on block propagation. +func (f *prunedfreezer) freeze() { + nfdb := &nofreezedb{KeyValueStore: f.db} + + var backoff bool + for { + select { + case <-f.quit: + log.Info("Freezer shutting down") + return + default: + } + if backoff { + select { + case <-time.NewTimer(freezerRecheckInterval).C: + case <-f.quit: + return + } + } + + // Retrieve the freezing threshold. + hash := ReadHeadBlockHash(nfdb) + if hash == (common.Hash{}) { + log.Debug("Current full block hash unavailable") // new chain, empty database + backoff = true + continue + } + number := ReadHeaderNumber(nfdb, hash) + threshold := atomic.LoadUint64(&f.threshold) + + switch { + case number == nil: + log.Error("Current full block number unavailable", "hash", hash) + backoff = true + continue + + case *number < threshold: + log.Debug("Current full block not old enough", "number", *number, "hash", hash, "delay", threshold) + backoff = true + continue + + case *number-threshold <= f.frozen: + log.Debug("Ancient blocks frozen already", "number", *number, "hash", hash, "frozen", f.frozen) + backoff = true + continue + } + head := ReadHeader(nfdb, hash, *number) + if head == nil { + log.Error("Stable state block unavailable", "number", *number, "hash", hash) + backoff = true + continue + } + + stableStabeNumber := ReadSafePointBlockNumber(nfdb) + switch { + case stableStabeNumber < params.StableStateThreshold: + log.Debug("Stable state block not old enough", "number", stableStabeNumber) + backoff = true + continue + + case stableStabeNumber > *number: + log.Warn("Stable state block biger current full block", "number", stableStabeNumber, "number", *number) + backoff = true + continue + } + stableStabeNumber -= params.StableStateThreshold + + // Seems we have data ready to be frozen, process in usable batches + limit := *number - threshold + if limit > stableStabeNumber { + limit = stableStabeNumber + } + + if limit < f.frozen { + log.Debug("Stable state block has prune", "limit", limit, "frozen", f.frozen) + backoff = true + continue + } + + if limit-f.frozen > freezerBatchLimit { + limit = f.frozen + freezerBatchLimit + } + var ( + start = time.Now() + first = f.frozen + ancients = make([]common.Hash, 0, limit-f.frozen) + ) + for f.frozen <= limit { + // Retrieves all the components of the canonical block + hash := ReadCanonicalHash(nfdb, f.frozen) + if hash == (common.Hash{}) { + log.Error("Canonical hash missing, can't freeze", "number", f.frozen) + } + log.Trace("Deep froze ancient block", "number", f.frozen, "hash", hash) + // Inject all the components into the relevant data tables + if err := f.AppendAncient(f.frozen, nil, nil, nil, nil, nil); err != nil { + log.Error("Append ancient err", "number", f.frozen, "hash", hash, "err", err) + break + } + if hash != (common.Hash{}) { + ancients = append(ancients, hash) + } + } + // Batch of blocks have been frozen, flush them before wiping from leveldb + if err := f.Sync(); err != nil { + log.Crit("Failed to flush frozen tables", "err", err) + } + backoff = f.frozen-first >= freezerBatchLimit + gcKvStore(f.db, ancients, first, f.frozen, start) + } +} diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index ece5006c39..77468f43e9 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -75,6 +75,15 @@ var ( //offSet of the ancientDB before updated version. offSetOfLastAncientFreezer = []byte("offSetOfLastAncientFreezer") + //frozenOfAncientDBKey tracks the block number for ancientDB to save. + frozenOfAncientDBKey = []byte("FrozenOfAncientDB") + + //LastSafePointBlockKey tracks the block number for block state that write disk + LastSafePointBlockKey = []byte("LastSafePointBlockNumber") + + //PruneAncientFlag flag whether prune ancient + pruneAncientKey = []byte("PruneAncientFlag") + // badBlockKey tracks the list of bad blocks seen by local badBlockKey = []byte("InvalidBlock") diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go index 5b070f3afa..b222bb97bb 100644 --- a/core/state/pruner/pruner.go +++ b/core/state/pruner/pruner.go @@ -258,7 +258,7 @@ func prune(snaptree *snapshot.Tree, root common.Hash, maindb ethdb.Database, sta func (p *BlockPruner) backUpOldDb(name string, cache, handles int, namespace string, readonly, interrupt bool) error { // Open old db wrapper. - chainDb, err := p.node.OpenDatabaseWithFreezer(name, cache, handles, p.oldAncientPath, namespace, readonly, true, interrupt) + chainDb, err := p.node.OpenDatabaseWithFreezer(name, cache, handles, p.oldAncientPath, namespace, readonly, true, interrupt, false) if err != nil { log.Error("Failed to open ancient database", "err=", err) return err diff --git a/eth/backend.go b/eth/backend.go index 3f782ff6a8..b9c9f9a2cb 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -130,7 +130,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { // Assemble the Ethereum object chainDb, err := stack.OpenAndMergeDatabase("chaindata", config.DatabaseCache, config.DatabaseHandles, - config.DatabaseFreezer, config.DatabaseDiff, "eth/db/chaindata/", false, config.PersistDiff) + config.DatabaseFreezer, config.DatabaseDiff, "eth/db/chaindata/", false, config.PersistDiff, config.PruneAncientData) if err != nil { return nil, err } diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 09baad1e1c..e25b55186e 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -167,6 +167,7 @@ type Config struct { DatabaseDiff string PersistDiff bool DiffBlock uint64 + PruneAncientData bool TrieCleanCache int TrieCleanCacheJournal string `toml:",omitempty"` // Disk journal directory for trie cache to survive node restarts diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go index f192a1aace..b3af010714 100644 --- a/eth/ethconfig/gen_config.go +++ b/eth/ethconfig/gen_config.go @@ -52,6 +52,7 @@ func (c Config) MarshalTOML() (interface{}, error) { Preimages bool PersistDiff bool DiffBlock uint64 `toml:",omitempty"` + PruneAncientData bool Miner miner.Config Ethash ethash.Config TxPool core.TxPoolConfig @@ -100,6 +101,7 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.Preimages = c.Preimages enc.PersistDiff = c.PersistDiff enc.DiffBlock = c.DiffBlock + enc.PruneAncientData = c.PruneAncientData enc.Miner = c.Miner enc.Ethash = c.Ethash enc.TxPool = c.TxPool @@ -145,6 +147,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { DatabaseDiff *string PersistDiff *bool DiffBlock *uint64 `toml:",omitempty"` + PruneAncientData *bool TrieCleanCache *int TrieCleanCacheJournal *string `toml:",omitempty"` TrieCleanCacheRejournal *time.Duration `toml:",omitempty"` @@ -248,6 +251,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.DiffBlock != nil { c.DiffBlock = *dec.DiffBlock } + if dec.PruneAncientData != nil { + c.PruneAncientData = *dec.PruneAncientData + } if dec.TrieCleanCache != nil { c.TrieCleanCache = *dec.TrieCleanCache } diff --git a/eth/handler.go b/eth/handler.go index cbc6eca809..f7c0e3c5dc 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -27,6 +27,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/forkid" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/fetcher" @@ -171,6 +172,9 @@ func newHandler(config *handlerConfig) (*handler, error) { // In these cases however it's safe to reenable fast sync. fullBlock, fastBlock := h.chain.CurrentBlock(), h.chain.CurrentFastBlock() if fullBlock.NumberU64() == 0 && fastBlock.NumberU64() > 0 { + if rawdb.ReadAncientType(h.database) == rawdb.PruneFreezerType { + log.Crit("Fast Sync not finish, can't enable pruneancient mode") + } h.fastSync = uint32(1) log.Warn("Switch sync mode from full sync to fast sync") } diff --git a/eth/sync.go b/eth/sync.go index 2256c7cb99..436200bb51 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -284,11 +284,16 @@ func (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) { // fast sync pivot, check if we should reenable if pivot := rawdb.ReadLastPivotNumber(cs.handler.database); pivot != nil { if head := cs.handler.chain.CurrentBlock(); head.NumberU64() < *pivot { + if rawdb.ReadAncientType(cs.handler.database) == rawdb.PruneFreezerType { + log.Crit("Current rewound to before the fast sync pivot, can't enable pruneancient mode", "current block number", head.NumberU64(), "pivot", *pivot) + } block := cs.handler.chain.CurrentFastBlock() td := cs.handler.chain.GetTdByHash(block.Hash()) + log.Warn("Switch full sync to fast sync", "current block number", head.NumberU64(), "pivot", *pivot) return downloader.FastSync, td } } + // Nope, we're really full syncing head := cs.handler.chain.CurrentBlock() td := cs.handler.chain.GetTd(head.Hash(), head.NumberU64()) diff --git a/node/node.go b/node/node.go index b6e1246b3d..df0693127f 100644 --- a/node/node.go +++ b/node/node.go @@ -582,12 +582,12 @@ func (n *Node) OpenDatabase(name string, cache, handles int, namespace string, r return db, err } -func (n *Node) OpenAndMergeDatabase(name string, cache, handles int, freezer, diff, namespace string, readonly, persistDiff bool) (ethdb.Database, error) { +func (n *Node) OpenAndMergeDatabase(name string, cache, handles int, freezer, diff, namespace string, readonly, persistDiff, pruneAncientData bool) (ethdb.Database, error) { chainDataHandles := handles if persistDiff { chainDataHandles = handles * chainDataHandlesPercentage / 100 } - chainDB, err := n.OpenDatabaseWithFreezer(name, cache, chainDataHandles, freezer, namespace, readonly, false, false) + chainDB, err := n.OpenDatabaseWithFreezer(name, cache, chainDataHandles, freezer, namespace, readonly, false, false, pruneAncientData) if err != nil { return nil, err } @@ -607,7 +607,7 @@ func (n *Node) OpenAndMergeDatabase(name string, cache, handles int, freezer, di // also attaching a chain freezer to it that moves ancient chain data from the // database to immutable append-only files. If the node is an ephemeral one, a // memory database is returned. -func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, freezer, namespace string, readonly, disableFreeze, isLastOffset bool) (ethdb.Database, error) { +func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, freezer, namespace string, readonly, disableFreeze, isLastOffset, pruneAncientData bool) (ethdb.Database, error) { n.lock.Lock() defer n.lock.Unlock() if n.state == closedState { @@ -626,7 +626,7 @@ func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, freezer, case !filepath.IsAbs(freezer): freezer = n.ResolvePath(freezer) } - db, err = rawdb.NewLevelDBDatabaseWithFreezer(root, cache, handles, freezer, namespace, readonly, disableFreeze, isLastOffset) + db, err = rawdb.NewLevelDBDatabaseWithFreezer(root, cache, handles, freezer, namespace, readonly, disableFreeze, isLastOffset, pruneAncientData) } if err == nil { diff --git a/params/network_params.go b/params/network_params.go index 9311b5e2d5..cc322e86f4 100644 --- a/params/network_params.go +++ b/params/network_params.go @@ -53,6 +53,9 @@ const ( // CheckpointProcessConfirmations is the number before a checkpoint is generated CheckpointProcessConfirmations = 256 + // StableStateThreshold is the reserve number of block state save to disk before delete ancientdb + StableStateThreshold uint64 = 128 + // FullImmutabilityThreshold is the number of blocks after which a chain segment is // considered immutable (i.e. soft finality). It is used by the downloader as a // hard limit against deep ancestors, by the blockchain against deep reorgs, by