Skip to content

Commit

Permalink
default not keep all blob info.
Browse files Browse the repository at this point in the history
  • Loading branch information
blxdyx committed Nov 28, 2024
1 parent 6afc003 commit e05e1da
Show file tree
Hide file tree
Showing 9 changed files with 23 additions and 28 deletions.
2 changes: 1 addition & 1 deletion cmd/integration/commands/stages.go
Original file line number Diff line number Diff line change
Expand Up @@ -1569,5 +1569,5 @@ func initConsensusEngine(ctx context.Context, cc *chain2.Config, dir string, db
consensusConfig = &config.Ethash
}
return ethconsensusconfig.CreateConsensusEngine(ctx, &nodecfg.Config{Dirs: datadir.New(dir)}, cc, consensusConfig, config.Miner.Notify, config.Miner.Noverify,
heimdallClient, config.WithoutHeimdall, config.BlobPrune, blockReader, db.ReadOnly(), logger, nil, nil), heimdallClient
heimdallClient, config.WithoutHeimdall, config.DisableBlobPrune, blockReader, db.ReadOnly(), logger, nil, nil), heimdallClient
}
2 changes: 1 addition & 1 deletion eth/backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -588,7 +588,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger
flags.Milestone = config.WithHeimdallMilestones
}

backend.engine = ethconsensusconfig.CreateConsensusEngine(ctx, stack.Config(), chainConfig, consensusConfig, config.Miner.Notify, config.Miner.Noverify, heimdallClient, config.WithoutHeimdall, config.BlobPrune, blockReader, false /* readonly */, logger, polygonBridge, heimdallService)
backend.engine = ethconsensusconfig.CreateConsensusEngine(ctx, stack.Config(), chainConfig, consensusConfig, config.Miner.Notify, config.Miner.Noverify, heimdallClient, config.WithoutHeimdall, config.DisableBlobPrune, blockReader, false /* readonly */, logger, polygonBridge, heimdallService)

inMemoryExecution := func(txc wrap.TxContainer, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody,
notifications *shards.Notifications) error {
Expand Down
2 changes: 1 addition & 1 deletion eth/ethconfig/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ type Config struct {
Aura chain.AuRaConfig
Parlia chain.ParliaConfig

BlobPrune bool // Prune Bsc BlobSidecars
DisableBlobPrune bool // Disable prune Bsc BlobSidecars

// Transaction pool options
DeprecatedTxPool DeprecatedTxPoolConfig
Expand Down
8 changes: 4 additions & 4 deletions eth/ethconsensusconfig/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ import (
)

func CreateConsensusEngine(ctx context.Context, nodeConfig *nodecfg.Config, chainConfig *chain.Config, config interface{}, notify []string, noVerify bool,
heimdallClient heimdall.HeimdallClient, withoutHeimdall bool, blobPrune bool, blockReader services.FullBlockReader, readonly bool,
heimdallClient heimdall.HeimdallClient, withoutHeimdall bool, disableBlobPrune bool, blockReader services.FullBlockReader, readonly bool,
logger log.Logger, polygonBridge bridge.Service, heimdallService heimdall.Service,
) consensus.Engine {
var eng consensus.Engine
Expand Down Expand Up @@ -140,9 +140,9 @@ func CreateConsensusEngine(ctx context.Context, nodeConfig *nodecfg.Config, chai
panic(err)
}
var blocksKept uint64
blocksKept = math.MaxUint64
if blobPrune {
blocksKept = params.MinBlocksForBlobRequests
blocksKept = params.MinBlocksForBlobRequests
if disableBlobPrune {
blocksKept = math.MaxUint64
}
blobStore := blob_storage.NewBlobStore(blobDb, afero.NewBasePathFs(afero.NewOsFs(), nodeConfig.Dirs.DataDir), blocksKept, chainConfig)

Expand Down
12 changes: 6 additions & 6 deletions turbo/app/snapshots_cmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -1016,7 +1016,7 @@ func doIndicesCommand(cliCtx *cli.Context, dirs datadir.Dirs) error {

var bs services.BlobStorage
if chainConfig.Parlia != nil {
bs = openBlobStore(dirs, chainConfig, true)
bs = openBlobStore(dirs, chainConfig, false)
}

_, _, _, caplinSnaps, br, agg, clean, err := openSnaps(ctx, dirs, chainDB, bs, logger)
Expand Down Expand Up @@ -1280,7 +1280,7 @@ func doRetireCommand(cliCtx *cli.Context, dirs datadir.Dirs) error {

var bs services.BlobStorage
if chainConfig.Parlia != nil {
bs = openBlobStore(dirs, chainConfig, blobPrune)
bs = openBlobStore(dirs, chainConfig, !blobPrune)
}

_, _, _, caplinSnaps, br, agg, clean, err := openSnaps(ctx, dirs, db, bs, logger)
Expand Down Expand Up @@ -1582,13 +1582,13 @@ func openAgg(ctx context.Context, dirs datadir.Dirs, chainDB kv.RwDB, logger log
// return nil
//}

func openBlobStore(dirs datadir.Dirs, chainConfig *chain.Config, blobPrune bool) services.BlobStorage {
func openBlobStore(dirs datadir.Dirs, chainConfig *chain.Config, disableBlobPrune bool) services.BlobStorage {
blobDbPath := path.Join(dirs.Blobs, "blob")
blobDb := mdbx.MustOpen(blobDbPath)
var blobKept uint64
blobKept = math.MaxUint64
if blobPrune {
blobKept = params.MinBlocksForBlobRequests
blobKept = params.MinBlocksForBlobRequests
if disableBlobPrune {
blobKept = math.MaxUint64
}
blobStore := blob_storage.NewBlobStore(blobDb, afero.NewBasePathFs(afero.NewOsFs(), dirs.Blobs), blobKept, chainConfig)
return blobStore
Expand Down
2 changes: 1 addition & 1 deletion turbo/cli/default_flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ var DefaultFlags = []cli.Flag{
&PruneDistanceFlag,
&PruneBlocksDistanceFlag,
&PruneModeFlag,
&PruneBscBlobSidecarsFlag,
&BscDisableBlobPruningFlag,
&BatchSizeFlag,
&BodyCacheLimitFlag,
&DatabaseVerbosityFlag,
Expand Down
10 changes: 5 additions & 5 deletions turbo/cli/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,9 +105,9 @@ var (
* tevm - write TEVM translated code to the DB`,
Value: "default",
}
PruneBscBlobSidecarsFlag = cli.BoolFlag{
Name: "prune.blobSidecars",
Usage: `enable blob pruning in bsc`,
BscDisableBlobPruningFlag = cli.BoolFlag{
Name: "bsc.blobSidecars.no-pruning",
Usage: `bsc will keep 18 days blobs in db`,
Value: false,
}

Expand Down Expand Up @@ -312,7 +312,7 @@ func ApplyFlagsForEthConfig(ctx *cli.Context, cfg *ethconfig.Config, logger log.
mode.History = prune.Distance(config3.DefaultPruneDistance)
}

cfg.BlobPrune = ctx.Bool(PruneBscBlobSidecarsFlag.Name)
cfg.DisableBlobPrune = ctx.Bool(BscDisableBlobPruningFlag.Name)

if err != nil {
utils.Fatalf(fmt.Sprintf("error while parsing mode: %v", err))
Expand Down Expand Up @@ -442,7 +442,7 @@ func ApplyFlagsForEthConfigCobra(f *pflag.FlagSet, cfg *ethconfig.Config) {
}
cfg.Prune = mode

cfg.BlobPrune = *f.Bool(PruneBscBlobSidecarsFlag.Name, PruneBscBlobSidecarsFlag.Value, PruneBscBlobSidecarsFlag.Usage)
cfg.DisableBlobPrune = *f.Bool(BscDisableBlobPruningFlag.Name, BscDisableBlobPruningFlag.Value, BscDisableBlobPruningFlag.Usage)

if v := f.String(BatchSizeFlag.Name, BatchSizeFlag.Value, BatchSizeFlag.Usage); v != nil {
err := cfg.BatchSize.UnmarshalText([]byte(*v))
Expand Down
5 changes: 0 additions & 5 deletions turbo/snapshotsync/snapshotsync.go
Original file line number Diff line number Diff line change
Expand Up @@ -317,11 +317,6 @@ func WaitForDownloader(ctx context.Context, logPrefix string, dirs datadir.Dirs,
}
}

// Bsc keep all the blob snapshot but Caplin is on the contrary
if caplin == NoCaplin {
blobs = !blobs
}

// build all download requests
for _, p := range preverifiedBlockSnapshots {
if caplin == NoCaplin && (strings.Contains(p.Name, "beaconblocks") || strings.Contains(p.Name, "blobsidecars")) && !strings.Contains(p.Name, "bscblobsiders") {
Expand Down
8 changes: 4 additions & 4 deletions turbo/stages/stageloop.go
Original file line number Diff line number Diff line change
Expand Up @@ -678,7 +678,7 @@ func NewDefaultStages(ctx context.Context,
runInTestMode := cfg.ImportMode

return stagedsync.DefaultStages(ctx,
stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, engine, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling || cfg.BlobPrune, silkworm, cfg.Prune),
stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, engine, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling || cfg.DisableBlobPrune, silkworm, cfg.Prune),
stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, blockReader, blockWriter, dirs.Tmp, notifications),
stagedsync.StageBorHeimdallCfg(db, snapDb, stagedsync.MiningState{}, *controlServer.ChainConfig, heimdallClient, blockReader, controlServer.Hd, controlServer.Penalize, recents, signatures, cfg.WithHeimdallWaypointRecording, nil),
stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter),
Expand Down Expand Up @@ -721,7 +721,7 @@ func NewPipelineStages(ctx context.Context,

if len(cfg.Sync.UploadLocation) == 0 {
return stagedsync.PipelineStages(ctx,
stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, engine, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling || cfg.BlobPrune, silkworm, cfg.Prune),
stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, engine, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling || cfg.DisableBlobPrune, silkworm, cfg.Prune),
stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter),
stagedsync.StageSendersCfg(db, controlServer.ChainConfig, cfg.Sync, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd),
stagedsync.StageExecuteBlocksCfg(db, cfg.Prune, cfg.BatchSize, controlServer.ChainConfig, controlServer.Engine, &vm.Config{}, notifications, cfg.StateStream, false, false, dirs, blockReader, controlServer.Hd, cfg.Genesis, cfg.Sync, SilkwormForExecutionStage(silkworm, cfg)),
Expand All @@ -730,7 +730,7 @@ func NewPipelineStages(ctx context.Context,
}

return stagedsync.UploaderPipelineStages(ctx,
stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, engine, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling || cfg.BlobPrune, silkworm, cfg.Prune),
stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, engine, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling || cfg.DisableBlobPrune, silkworm, cfg.Prune),
stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, blockReader, blockWriter, dirs.Tmp, notifications),
stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter),
stagedsync.StageSendersCfg(db, controlServer.ChainConfig, cfg.Sync, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd),
Expand Down Expand Up @@ -786,7 +786,7 @@ func NewPolygonSyncStages(
consensusEngine,
agg,
config.InternalCL && config.CaplinConfig.Backfilling,
config.CaplinConfig.BlobBackfilling || config.BlobPrune,
config.CaplinConfig.BlobBackfilling || config.DisableBlobPrune,
silkworm,
config.Prune,
),
Expand Down

0 comments on commit e05e1da

Please sign in to comment.