diff --git a/.github/workflows/qa-clean-exit-block-downloading.yml b/.github/workflows/qa-clean-exit-block-downloading.yml index 78016ee6237..5ef2c7ddfec 100644 --- a/.github/workflows/qa-clean-exit-block-downloading.yml +++ b/.github/workflows/qa-clean-exit-block-downloading.yml @@ -1,6 +1,9 @@ name: QA - Clean exit (block downloading) on: +# push: +# branches: +# - 'release/3.*' # schedule: # - cron: '0 8 * * 1-6' # Run every day at 08:00 AM UTC except Sunday workflow_dispatch: # Run manually diff --git a/.github/workflows/qa-rpc-integration-tests.yml b/.github/workflows/qa-rpc-integration-tests.yml index 5d7501351f6..769e91b9cf8 100644 --- a/.github/workflows/qa-rpc-integration-tests.yml +++ b/.github/workflows/qa-rpc-integration-tests.yml @@ -5,9 +5,11 @@ on: # push: # branches: # - main +# - 'release/3.*' # pull_request: # branches: # - main +# - 'release/3.*' # types: # - opened # - reopened diff --git a/.github/workflows/qa-rpc-performance-tests.yml b/.github/workflows/qa-rpc-performance-tests.yml index 491b2a318cd..fdf35bffee6 100644 --- a/.github/workflows/qa-rpc-performance-tests.yml +++ b/.github/workflows/qa-rpc-performance-tests.yml @@ -1,6 +1,9 @@ name: QA - RPC Performance Tests on: + push: + branches: + - 'release/3.*' workflow_dispatch: schedule: - cron: '0 0 * * 0' # Run on Sunday at 00:00 AM UTC diff --git a/.github/workflows/qa-snap-download.yml b/.github/workflows/qa-snap-download.yml index 2e22ea82b51..d685d9d713a 100644 --- a/.github/workflows/qa-snap-download.yml +++ b/.github/workflows/qa-snap-download.yml @@ -1,6 +1,9 @@ name: QA - Snapshot Download on: +# push: +# branches: +# - 'release/3.*' # schedule: # - cron: '0 20 * * 1-6' # Run every night at 20:00 (08:00 PM) UTC except Sunday workflow_dispatch: # Run manually diff --git a/.github/workflows/qa-sync-from-scratch-minimal-node.yml b/.github/workflows/qa-sync-from-scratch-minimal-node.yml index ec18113d076..5c373f16691 100644 --- a/.github/workflows/qa-sync-from-scratch-minimal-node.yml +++ b/.github/workflows/qa-sync-from-scratch-minimal-node.yml @@ -1,6 +1,9 @@ name: QA - Sync from scratch (minimal node) on: +# push: +# branches: +# - 'release/3.*' # schedule: # - cron: '0 0 * * *' # Run every night at 00:00 AM UTC workflow_dispatch: # Run manually diff --git a/.github/workflows/qa-tip-tracking-gnosis.yml b/.github/workflows/qa-tip-tracking-gnosis.yml index c8aa2f929bf..86519396bd5 100644 --- a/.github/workflows/qa-tip-tracking-gnosis.yml +++ b/.github/workflows/qa-tip-tracking-gnosis.yml @@ -1,6 +1,9 @@ name: QA - Tip tracking (Gnosis) on: +# push: +# branches: +# - 'release/3.*' # schedule: # - cron: '0 0 * * 1-6' # Run every night at 00:00 AM UTC except Sunday workflow_dispatch: # Run manually diff --git a/.github/workflows/qa-tip-tracking-polygon.yml b/.github/workflows/qa-tip-tracking-polygon.yml index f9e7cded67c..90804298bec 100644 --- a/.github/workflows/qa-tip-tracking-polygon.yml +++ b/.github/workflows/qa-tip-tracking-polygon.yml @@ -1,6 +1,9 @@ name: QA - Tip tracking (Polygon) on: +# push: +# branches: +# - 'release/3.*' # schedule: # - cron: '0 0 * * 1-6' # Run every night at 00:00 AM UTC except Sunday workflow_dispatch: # Run manually diff --git a/.github/workflows/qa-tip-tracking.yml b/.github/workflows/qa-tip-tracking.yml index 4e368368e45..15c22d5f372 100644 --- a/.github/workflows/qa-tip-tracking.yml +++ b/.github/workflows/qa-tip-tracking.yml @@ -1,6 +1,9 @@ name: QA - Tip tracking on: +# push: +# branches: +# - 'release/3.*' # schedule: # - cron: '0 20 * * 1-6' # Run every night at 08:00 PM UTC except Sunday workflow_dispatch: # Run manually diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 3b4238ca0bf..2663d18d35d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,5 +1,5 @@ name: Release -run-name: Build release ${{ inputs.release_version}} from branch ${{ inputs.checkout_ref }} by @${{ github.actor }} +run-name: Build release ${{ inputs.release_version}} from branch ${{ inputs.checkout_ref }}, Skip tests=${{ inputs.skip_tests }} env: APPLICATION: "bsc-erigon" @@ -37,6 +37,11 @@ on: type: boolean default: false description: 'publish_latest_tag: when set then docker image with tag :latest will be also published' + skip_tests: + required: false + type: boolean + default: false + description: 'Skip tests during release build (not recommended)' jobs: @@ -200,6 +205,7 @@ jobs: # test-release: # name: test on ${{ matrix.id }} +# if: ${{ ! inputs.skip_tests }} # runs-on: [ self-hosted, Release, "${{ matrix.runner-arch }}" ] # timeout-minutes: 7200 # 5 days # needs: [ build-release ] @@ -283,6 +289,7 @@ jobs: build-debian-pkg: name: Debian packages needs: [ build-release ] + if: always() && contains(needs.build-release.result, 'success') && !contains(needs.test-release.result, 'failure') uses: erigontech/erigon/.github/workflows/reusable-release-build-debian-pkg.yml@main with: application: ${{ needs.build-release.outputs.application }} @@ -291,9 +298,10 @@ jobs: publish-docker-image: needs: [ build-release ] + if: always() && contains(needs.build-release.result, 'success') && !contains(needs.test-release.result, 'failure') runs-on: ubuntu-latest timeout-minutes: 30 - name: Docker image + name: Docker image steps: @@ -370,6 +378,7 @@ jobs: publish-release: needs: [ build-debian-pkg, publish-docker-image, build-release ] + if: always() && contains(needs.build-release.result, 'success') && contains(needs.build-debian-pkg.result, 'success') && contains(needs.publish-docker-image.result, 'success') runs-on: ubuntu-latest timeout-minutes: 15 name: Publish release notes @@ -427,8 +436,8 @@ jobs: In-case-of-failure: name: "In case of failure: remove remote git tag pointing to the new version." - needs: [ publish-release, build-release ] - if: always() && !contains(needs.build-release.result, 'success') + needs: [ publish-release, build-release, test-release ] + if: always() && !contains(needs.build-release.result, 'success') && contains(needs.test-release.result, 'failure') && !contains(needs.publish-release.result, 'success') && !contains(needs.build-debian-pkg.result, 'success') && !contains(needs.publish-docker-image.result, 'success') runs-on: ubuntu-22.04 steps: diff --git a/cl/cltypes/solid/validator_set.go b/cl/cltypes/solid/validator_set.go index 72b5ccd13e5..e57af6bc677 100644 --- a/cl/cltypes/solid/validator_set.go +++ b/cl/cltypes/solid/validator_set.go @@ -91,16 +91,17 @@ func (v *ValidatorSet) expandBuffer(newValidatorSetLength int) { func (v *ValidatorSet) Append(val Validator) { offset := v.EncodingSizeSSZ() // we are overflowing the buffer? append. - if offset+validatorSize >= len(v.buffer) { - v.expandBuffer(v.l + 1) - v.phase0Data = append(v.phase0Data, Phase0Data{}) + //if offset+validatorSize >= len(v.buffer) { + v.expandBuffer(v.l + 1) + v.phase0Data = append(v.phase0Data, Phase0Data{}) + //} - if v.MerkleTree != nil { - v.MerkleTree.AppendLeaf() - } - v.zeroTreeHash(v.l) - copy(v.buffer[offset:], val) + copy(v.buffer[offset:], val) + if v.MerkleTree != nil { + v.MerkleTree.AppendLeaf() } + v.zeroTreeHash(v.l) + if v.l >= len(v.phase0Data) { for i := len(v.phase0Data); i < v.l+1; i++ { v.phase0Data = append(v.phase0Data, Phase0Data{}) @@ -171,6 +172,7 @@ func (v *ValidatorSet) CopyTo(t *ValidatorSet) { t.phase0Data = make([]Phase0Data, v.l) copy(t.buffer, v.buffer) copy(t.attesterBits, v.attesterBits) + t.buffer = t.buffer[:v.l*validatorSize] t.attesterBits = t.attesterBits[:v.l] } diff --git a/cl/merkle_tree/merkle_tree.go b/cl/merkle_tree/merkle_tree.go index 73d638881f8..92de2e98f78 100644 --- a/cl/merkle_tree/merkle_tree.go +++ b/cl/merkle_tree/merkle_tree.go @@ -201,42 +201,46 @@ func (m *MerkleTree) CopyInto(other *MerkleTree) { m.mu.RLock() defer m.mu.RUnlock() defer other.mu.Unlock() - //other.computeLeaf = m.computeLeaf - if len(other.layers) > len(m.layers) { - // reset the internal layers - for i := len(m.layers); i < len(other.layers); i++ { - other.layers[i] = other.layers[i][:0] - } - other.layers = other.layers[:len(m.layers)] + + // Copy primitive fields + other.computeLeaf = m.computeLeaf + other.leavesCount = m.leavesCount + if m.limit != nil { + other.limit = new(uint64) // Shallow copy + *other.limit = *m.limit + } else { + other.limit = nil } - if len(m.layers) > len(other.layers) { - for len(other.layers) != len(m.layers) { - idx := len(other.layers) - other.layers = append(other.layers, make([]byte, len(m.layers[idx]), (len(m.layers[idx])*3)/2)) - } + // Ensure `other.layers` has enough capacity (with +50% buffer for future growth) + requiredLayersLen := len(m.layers) + if cap(other.layers) < requiredLayersLen { + other.layers = make([][]byte, requiredLayersLen, requiredLayersLen+(requiredLayersLen/2)) + } else { + other.layers = other.layers[:requiredLayersLen] } - for i := 0; i < len(m.layers); i++ { - // If the destination buffer is too short, extend it - if len(m.layers[i]) > cap(other.layers[i]) { - other.layers[i] = make([]byte, len(m.layers[i]), (len(m.layers[i])*3)/2) + // Copy layers while reusing memory, and allocate with +50% extra space if needed + for i := range m.layers { + requiredLayerLen := len(m.layers[i]) + if cap(other.layers[i]) < requiredLayerLen { + other.layers[i] = make([]byte, requiredLayerLen, requiredLayerLen+(requiredLayerLen/2)) + } else { + other.layers[i] = other.layers[i][:requiredLayerLen] } - // Normalizr the destination length - other.layers[i] = other.layers[i][:len(m.layers[i])] - - // Now that the 2 slices are of equal length we can do a simple memcopy copy(other.layers[i], m.layers[i]) } - other.leavesCount = m.leavesCount - other.limit = m.limit - //other.dirtyLeaves = make([]atomic.Bool, len(m.dirtyLeaves)) + // Ensure `other.dirtyLeaves` has enough capacity (with +50% buffer for future growth) + requiredLeavesLen := len(m.dirtyLeaves) + if cap(other.dirtyLeaves) < requiredLeavesLen { + other.dirtyLeaves = make([]atomic.Bool, requiredLeavesLen, requiredLeavesLen+(requiredLeavesLen/2)) + } else { + other.dirtyLeaves = other.dirtyLeaves[:requiredLeavesLen] + } - for i := 0; i < len(m.dirtyLeaves); i++ { - if i >= len(other.dirtyLeaves) { - other.dirtyLeaves = append(other.dirtyLeaves, atomic.Bool{}) - } + // Copy atomic dirty leaves state + for i := range m.dirtyLeaves { other.dirtyLeaves[i].Store(m.dirtyLeaves[i].Load()) } } diff --git a/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go b/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go index f7aeff38765..14011fec259 100644 --- a/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go +++ b/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go @@ -379,7 +379,7 @@ func (f *forkGraphDisk) useCachedStateIfPossible(blockRoot libcommon.Hash, in *s } if prevHeadBlockRoot != blockRoot { - log.Warn("Not Using a cached beacon state", "blockRoot", blockRoot) + log.Debug("Not Using a cached beacon state", "blockRoot", blockRoot) return nil } ok = true diff --git a/cl/phase1/network/beacon_downloader.go b/cl/phase1/network/beacon_downloader.go index 5c9614bcc67..64a537eaeea 100644 --- a/cl/phase1/network/beacon_downloader.go +++ b/cl/phase1/network/beacon_downloader.go @@ -76,7 +76,7 @@ type peerAndBlocks struct { } func (f *ForwardBeaconDownloader) RequestMore(ctx context.Context) { - count := uint64(32) + count := uint64(16) var atomicResp atomic.Value atomicResp.Store(peerAndBlocks{}) reqInterval := time.NewTicker(300 * time.Millisecond) @@ -96,14 +96,17 @@ Loop: } // double the request count every 10 seconds. This is inspired by the mekong network, which has many consecutive missing blocks. reqCount := count - if !f.highestSlotUpdateTime.IsZero() { - multiplier := int(time.Since(f.highestSlotUpdateTime).Seconds()) / 10 - multiplier = min(multiplier, 6) - reqCount *= uint64(1 << uint(multiplier)) - } + // NEED TO COMMENT THIS BC IT CAUSES ISSUES ON MAINNET + + // if !f.highestSlotUpdateTime.IsZero() { + // multiplier := int(time.Since(f.highestSlotUpdateTime).Seconds()) / 10 + // multiplier = min(multiplier, 6) + // reqCount *= uint64(1 << uint(multiplier)) + // } + // leave a warning if we are stuck for more than 90 seconds if time.Since(f.highestSlotUpdateTime) > 90*time.Second { - log.Debug("Forward beacon downloader gets stuck", "time", time.Since(f.highestSlotUpdateTime).Seconds(), "highestSlotProcessed", f.highestSlotProcessed) + log.Trace("Forward beacon downloader gets stuck", "time", time.Since(f.highestSlotUpdateTime).Seconds(), "highestSlotProcessed", f.highestSlotProcessed) } // this is so we do not get stuck on a side-fork responses, peerId, err := f.rpc.SendBeaconBlocksByRangeReq(ctx, reqSlot, reqCount) diff --git a/cl/phase1/stages/forward_sync.go b/cl/phase1/stages/forward_sync.go index 5a435f302c1..7bde4554c59 100644 --- a/cl/phase1/stages/forward_sync.go +++ b/cl/phase1/stages/forward_sync.go @@ -2,6 +2,7 @@ package stages import ( "context" + "errors" "fmt" "sort" "sync/atomic" @@ -16,6 +17,7 @@ import ( "github.com/erigontech/erigon/cl/persistence/beacon_indicies" "github.com/erigontech/erigon/cl/persistence/blob_storage" "github.com/erigontech/erigon/cl/phase1/core/state" + "github.com/erigontech/erigon/cl/phase1/forkchoice" network2 "github.com/erigontech/erigon/cl/phase1/network" ) @@ -35,16 +37,18 @@ func shouldProcessBlobs(blocks []*cltypes.SignedBeaconBlock, cfg *Cfg) bool { } // Check if the requested blocks are too old to request blobs // https://github.com/ethereum/consensus-specs/blob/dev/specs/deneb/p2p-interface.md#the-reqresp-domain - highestEpoch := highestSlot / cfg.beaconCfg.SlotsPerEpoch - currentEpoch := cfg.ethClock.GetCurrentEpoch() - minEpochDist := uint64(0) - if currentEpoch > cfg.beaconCfg.MinEpochsForBlobSidecarsRequests { - minEpochDist = currentEpoch - cfg.beaconCfg.MinEpochsForBlobSidecarsRequests - } - finalizedEpoch := currentEpoch - 2 - if highestEpoch < max(cfg.beaconCfg.DenebForkEpoch, minEpochDist, finalizedEpoch) { - return false - } + + // this is bad + // highestEpoch := highestSlot / cfg.beaconCfg.SlotsPerEpoch + // currentEpoch := cfg.ethClock.GetCurrentEpoch() + // minEpochDist := uint64(0) + // if currentEpoch > cfg.beaconCfg.MinEpochsForBlobSidecarsRequests { + // minEpochDist = currentEpoch - cfg.beaconCfg.MinEpochsForBlobSidecarsRequests + // } + // finalizedEpoch := currentEpoch - 2 + // if highestEpoch < max(cfg.beaconCfg.DenebForkEpoch, minEpochDist, finalizedEpoch) { + // return false + // } return blobsExist } @@ -65,6 +69,7 @@ func downloadAndProcessEip4844DA(ctx context.Context, logger log.Logger, cfg *Cf err = fmt.Errorf("failed to get blob identifiers: %w", err) return } + // If there are no blobs to retrieve, return the highest slot processed if ids.Len() == 0 { return highestSlotProcessed, nil @@ -96,20 +101,57 @@ func downloadAndProcessEip4844DA(ctx context.Context, logger log.Logger, cfg *Cf return highestProcessed - 1, err } +func filterUnneededBlocks(ctx context.Context, blocks []*cltypes.SignedBeaconBlock, cfg *Cfg) []*cltypes.SignedBeaconBlock { + filtered := make([]*cltypes.SignedBeaconBlock, 0, len(blocks)) + // Find the latest block in the list + for _, block := range blocks { + blockRoot, err := block.Block.HashSSZ() + if err != nil { + panic(err) + } + _, hasInFcu := cfg.forkChoice.GetHeader(blockRoot) + + var hasSignedHeaderInDB bool + if err = cfg.indiciesDB.View(ctx, func(tx kv.Tx) error { + _, hasSignedHeaderInDB, err = beacon_indicies.ReadSignedHeaderByBlockRoot(ctx, tx, blockRoot) + return err + }); err != nil { + panic(err) + } + if !hasInFcu || !hasSignedHeaderInDB { + filtered = append(filtered, block) + } + } + return filtered +} + // processDownloadedBlockBatches processes a batch of downloaded blocks. // It takes the highest block processed, a flag to determine if insertion is needed, and a list of signed beacon blocks as input. // It returns the new highest block processed and an error if any. -func processDownloadedBlockBatches(ctx context.Context, cfg *Cfg, highestBlockProcessed uint64, shouldInsert bool, blocks []*cltypes.SignedBeaconBlock) (newHighestBlockProcessed uint64, err error) { +func processDownloadedBlockBatches(ctx context.Context, logger log.Logger, cfg *Cfg, highestBlockProcessed uint64, shouldInsert bool, blocks []*cltypes.SignedBeaconBlock) (newHighestBlockProcessed uint64, err error) { // Pre-process the block batch to ensure that the blocks are sorted by slot in ascending order sort.Slice(blocks, func(i, j int) bool { return blocks[i].Block.Slot < blocks[j].Block.Slot }) + // Filter out blocks that are already in the FCU or have a signed header in the DB + blocks = filterUnneededBlocks(ctx, blocks, cfg) + if len(blocks) == 0 { + return highestBlockProcessed, nil + } + var ( blockRoot common.Hash st *state.CachingBeaconState ) newHighestBlockProcessed = highestBlockProcessed + if shouldProcessBlobs(blocks, cfg) { + _, err = downloadAndProcessEip4844DA(ctx, logger, cfg, highestBlockProcessed, blocks) + if err != nil { + logger.Trace("[Caplin] Failed to process blobs", "err", err) + return highestBlockProcessed, nil + } + } // Iterate over each block in the sorted list for _, block := range blocks { // Compute the hash of the current block @@ -131,7 +173,16 @@ func processDownloadedBlockBatches(ctx context.Context, cfg *Cfg, highestBlockPr } // Process the block - if err = processBlock(ctx, cfg, cfg.indiciesDB, block, false, true, false); err != nil { + if err = processBlock(ctx, cfg, cfg.indiciesDB, block, false, true, true); err != nil { + fmt.Println("EIP-4844 data not available", err, block.Block.Slot) + if errors.Is(err, forkchoice.ErrEIP4844DataNotAvailable) { + // Return an error if EIP-4844 data is not available + logger.Trace("[Caplin] forward sync EIP-4844 data not available", "blockSlot", block.Block.Slot) + if newHighestBlockProcessed == 0 { + return 0, nil + } + return newHighestBlockProcessed - 1, nil + } // Return an error if block processing fails err = fmt.Errorf("bad blocks segment received: %w", err) return @@ -193,27 +244,13 @@ func forwardSync(ctx context.Context, logger log.Logger, cfg *Cfg, args Args) er // Set the function to process downloaded blocks downloader.SetProcessFunction(func(initialHighestSlotProcessed uint64, blocks []*cltypes.SignedBeaconBlock) (newHighestSlotProcessed uint64, err error) { - highestSlotProcessed, err := processDownloadedBlockBatches(ctx, cfg, initialHighestSlotProcessed, shouldInsert, blocks) + highestSlotProcessed, err := processDownloadedBlockBatches(ctx, logger, cfg, initialHighestSlotProcessed, shouldInsert, blocks) if err != nil { logger.Warn("[Caplin] Failed to process block batch", "err", err) return initialHighestSlotProcessed, err } - // Exit if we are pre-EIP-4844 - if !shouldProcessBlobs(blocks, cfg) { - currentSlot.Store(highestSlotProcessed) - return highestSlotProcessed, nil - } - // Process blobs for EIP-4844 - highestBlobSlotProcessed, err := downloadAndProcessEip4844DA(ctx, logger, cfg, initialHighestSlotProcessed, blocks) - if err != nil { - logger.Warn("[Caplin] Failed to process blobs", "err", err) - return initialHighestSlotProcessed, err - } - if highestBlobSlotProcessed <= initialHighestSlotProcessed { - return initialHighestSlotProcessed, nil - } - currentSlot.Store(highestBlobSlotProcessed) - return highestBlobSlotProcessed, nil + currentSlot.Store(highestSlotProcessed) + return highestSlotProcessed, nil }) // Get the current slot of the chain tip diff --git a/ethstats/ethstats.go b/ethstats/ethstats.go index eb5ebb51ab4..706939faf42 100644 --- a/ethstats/ethstats.go +++ b/ethstats/ethstats.go @@ -595,9 +595,9 @@ func (s *Service) reportHistory(conn *connWrapper, list []uint64) error { } else { // No indexes requested, send back the top ones headHash := rawdb.ReadHeadBlockHash(roTx) - headNumber, _ := s.blockReader.HeaderNumber(context.Background(), roTx, headHash) - if headNumber == nil { - return nil + headNumber, err := s.blockReader.HeaderNumber(context.Background(), roTx, headHash) + if headNumber == nil || err != nil { + return err } start := int(*headNumber - historyUpdateRange + 1) if start < 0 { @@ -615,12 +615,12 @@ func (s *Service) reportHistory(conn *connWrapper, list []uint64) error { if err != nil { return err } - td, err := rawdb.ReadTd(roTx, block.Hash(), number) - if err != nil { - return err - } // If we do have the block, add to the history and continue if block != nil { + td, err := rawdb.ReadTd(roTx, block.Hash(), number) + if err != nil { + return err + } history[len(history)-1-i] = s.assembleBlockStats(block, td) continue } diff --git a/go.mod b/go.mod index 671bcdcd93f..8411cefddae 100644 --- a/go.mod +++ b/go.mod @@ -290,7 +290,7 @@ require ( github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spf13/cast v1.5.0 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect - github.com/supranational/blst v0.3.13 + github.com/supranational/blst v0.3.14 github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect go.etcd.io/bbolt v1.3.9 // indirect go.opentelemetry.io/otel v1.31.0 // indirect diff --git a/go.sum b/go.sum index 59f98c466a2..ca58c19173b 100644 --- a/go.sum +++ b/go.sum @@ -908,8 +908,8 @@ github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk= -github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/supranational/blst v0.3.14 h1:xNMoHRJOTwMn63ip6qoWJ2Ymgvj7E2b9jY2FAwY+qRo= +github.com/supranational/blst v0.3.14/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E= github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME= diff --git a/turbo/engineapi/engine_helpers/constants.go b/turbo/engineapi/engine_helpers/constants.go index 29b2c882335..b240d9e90c0 100644 --- a/turbo/engineapi/engine_helpers/constants.go +++ b/turbo/engineapi/engine_helpers/constants.go @@ -24,3 +24,15 @@ var UnknownPayloadErr = rpc.CustomError{Code: -38001, Message: "Unknown payload" var InvalidForkchoiceStateErr = rpc.CustomError{Code: -38002, Message: "Invalid forkchoice state"} var InvalidPayloadAttributesErr = rpc.CustomError{Code: -38003, Message: "Invalid payload attributes"} var TooLargeRequestErr = rpc.CustomError{Code: -38004, Message: "Too large request"} + +const PectraBanner = ` +'########::'########::'######::'########:'########:::::'###:::: + ##.... ##: ##.....::'##... ##:... ##..:: ##.... ##:::'## ##::: + ##:::: ##: ##::::::: ##:::..::::: ##:::: ##:::: ##::'##:. ##:: + ########:: ######::: ##:::::::::: ##:::: ########::'##:::. ##: + ##.....::: ##...:::: ##:::::::::: ##:::: ##.. ##::: #########: + ##:::::::: ##::::::: ##::: ##:::: ##:::: ##::. ##:: ##.... ##: + ##:::::::: ########:. ######::::: ##:::: ##:::. ##: ##:::: ##: +..:::::::::........:::......::::::..:::::..:::::..::..:::::..:: +====================== PECTRA ACTIVATED ====================== +` diff --git a/turbo/engineapi/engine_server.go b/turbo/engineapi/engine_server.go index 131e988dc7c..60bf2cc1756 100644 --- a/turbo/engineapi/engine_server.go +++ b/turbo/engineapi/engine_server.go @@ -75,6 +75,8 @@ type EngineServer struct { logger log.Logger engineLogSpamer *engine_logs_spammer.EngineLogsSpammer + // TODO Remove this on next release + printPectraBanner bool } const fcuTimeout = 1000 // according to mathematics: 1000 millisecods = 1 second @@ -84,15 +86,16 @@ func NewEngineServer(logger log.Logger, config *chain.Config, executionService e blockDownloader *engine_block_downloader.EngineBlockDownloader, caplin, test, proposing, consuming bool) *EngineServer { chainRW := eth1_chain_reader.NewChainReaderEth1(config, executionService, fcuTimeout) srv := &EngineServer{ - logger: logger, - config: config, - executionService: executionService, - blockDownloader: blockDownloader, - chainRW: chainRW, - proposing: proposing, - hd: hd, - caplin: caplin, - engineLogSpamer: engine_logs_spammer.NewEngineLogsSpammer(logger, config), + logger: logger, + config: config, + executionService: executionService, + blockDownloader: blockDownloader, + chainRW: chainRW, + proposing: proposing, + hd: hd, + caplin: caplin, + engineLogSpamer: engine_logs_spammer.NewEngineLogsSpammer(logger, config), + printPectraBanner: true, } srv.consuming.Store(consuming) @@ -339,6 +342,11 @@ func (s *EngineServer) newPayload(ctx context.Context, req *engine_types.Executi return nil, payloadStatus.CriticalError } + if version == clparams.ElectraVersion && s.printPectraBanner && payloadStatus.Status == engine_types.ValidStatus { + s.printPectraBanner = false + log.Info(engine_helpers.PectraBanner) + } + return payloadStatus, nil } @@ -430,14 +438,14 @@ func (s *EngineServer) getQuickPayloadStatusIfPossible(ctx context.Context, bloc if header != nil && isCanonical { return &engine_types.PayloadStatus{Status: engine_types.ValidStatus, LatestValidHash: &blockHash}, nil } - if shouldWait, _ := waitForStuff(func() (bool, error) { + if shouldWait, _ := waitForStuff(50*time.Millisecond, func() (bool, error) { return parent == nil && s.hd.PosStatus() == headerdownload.Syncing, nil }); shouldWait { s.logger.Info(fmt.Sprintf("[%s] Downloading some other PoS blocks", prefix), "hash", blockHash) return &engine_types.PayloadStatus{Status: engine_types.SyncingStatus}, nil } } else { - if shouldWait, _ := waitForStuff(func() (bool, error) { + if shouldWait, _ := waitForStuff(50*time.Millisecond, func() (bool, error) { return header == nil && s.hd.PosStatus() == headerdownload.Syncing, nil }); shouldWait { s.logger.Info(fmt.Sprintf("[%s] Downloading some other PoS stuff", prefix), "hash", blockHash) @@ -451,7 +459,7 @@ func (s *EngineServer) getQuickPayloadStatusIfPossible(ctx context.Context, bloc return &engine_types.PayloadStatus{Status: engine_types.ValidStatus, LatestValidHash: &blockHash}, nil } } - waitingForExecutionReady, err := waitForStuff(func() (bool, error) { + waitingForExecutionReady, err := waitForStuff(500*time.Millisecond, func() (bool, error) { isReady, err := s.chainRW.Ready(ctx) return !isReady, err }) @@ -617,7 +625,7 @@ func (s *EngineServer) forkchoiceUpdated(ctx context.Context, forkchoiceState *e var resp *execution.AssembleBlockResponse - execBusy, err := waitForStuff(func() (bool, error) { + execBusy, err := waitForStuff(500*time.Millisecond, func() (bool, error) { resp, err = s.executionService.AssembleBlock(ctx, req) if err != nil { return false, err @@ -747,15 +755,9 @@ func (e *EngineServer) HandleNewPayload( if currentHeadNumber != nil { // We try waiting until we finish downloading the PoS blocks if the distance from the head is enough, // so that we will perform full validation. - success := false - for i := 0; i < 100; i++ { - time.Sleep(10 * time.Millisecond) - if e.blockDownloader.Status() == headerdownload.Synced { - success = true - break - } - } - if !success { + if stillSyncing, _ := waitForStuff(500*time.Millisecond, func() (bool, error) { + return e.blockDownloader.Status() != headerdownload.Synced, nil + }); stillSyncing { return &engine_types.PayloadStatus{Status: engine_types.SyncingStatus}, nil } status, _, latestValidHash, err := e.chainRW.ValidateChain(ctx, headerHash, headerNumber) @@ -885,15 +887,15 @@ func (e *EngineServer) SetConsuming(consuming bool) { e.consuming.Store(consuming) } -func waitForStuff(waitCondnF func() (bool, error)) (bool, error) { +func waitForStuff(maxWait time.Duration, waitCondnF func() (bool, error)) (bool, error) { shouldWait, err := waitCondnF() if err != nil || !shouldWait { return false, err } - // Times out after 8s - loosely based on timeouts of FCU and NewPayload for Ethereum specs - // Look for "timeout" in, for instance, https://github.com/ethereum/execution-apis/blob/main/src/engine/cancun.md - for i := 0; i < 800; i++ { - time.Sleep(10 * time.Millisecond) + checkInterval := 10 * time.Millisecond + maxChecks := int64(maxWait) / int64(checkInterval) + for i := int64(0); i < maxChecks; i++ { + time.Sleep(checkInterval) shouldWait, err = waitCondnF() if err != nil || !shouldWait { return shouldWait, err diff --git a/turbo/jsonrpc/eth_block.go b/turbo/jsonrpc/eth_block.go index dbc9d924142..ba1cffe828c 100644 --- a/turbo/jsonrpc/eth_block.go +++ b/turbo/jsonrpc/eth_block.go @@ -35,7 +35,6 @@ import ( "github.com/erigontech/erigon/core/state" "github.com/erigontech/erigon/core/types" "github.com/erigontech/erigon/core/vm" - "github.com/erigontech/erigon/polygon/bor/borcfg" bortypes "github.com/erigontech/erigon/polygon/bor/types" "github.com/erigontech/erigon/rpc" "github.com/erigontech/erigon/turbo/adapter/ethapi" @@ -261,11 +260,6 @@ func (api *APIImpl) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber } } - if chainConfig.Bor != nil { - borConfig := chainConfig.Bor.(*borcfg.BorConfig) - response["miner"], _ = ecrecover(b.Header(), borConfig) - } - return response, err } @@ -332,11 +326,6 @@ func (api *APIImpl) GetBlockByHash(ctx context.Context, numberOrHash rpc.BlockNu } } - if chainConfig.Bor != nil { - borConfig := chainConfig.Bor.(*borcfg.BorConfig) - response["miner"], _ = ecrecover(block.Header(), borConfig) - } - return response, err }