Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Upstream beta2 #616

Merged
merged 14 commits into from
Feb 19, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .github/workflows/qa-clean-exit-block-downloading.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
name: QA - Clean exit (block downloading)

on:
# push:
# branches:
# - 'release/3.*'
# schedule:
# - cron: '0 8 * * 1-6' # Run every day at 08:00 AM UTC except Sunday
workflow_dispatch: # Run manually
Expand Down
2 changes: 2 additions & 0 deletions .github/workflows/qa-rpc-integration-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,11 @@ on:
# push:
# branches:
# - main
# - 'release/3.*'
# pull_request:
# branches:
# - main
# - 'release/3.*'
# types:
# - opened
# - reopened
Expand Down
3 changes: 3 additions & 0 deletions .github/workflows/qa-rpc-performance-tests.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
name: QA - RPC Performance Tests

on:
push:
branches:
- 'release/3.*'
workflow_dispatch:
schedule:
- cron: '0 0 * * 0' # Run on Sunday at 00:00 AM UTC
Expand Down
3 changes: 3 additions & 0 deletions .github/workflows/qa-snap-download.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
name: QA - Snapshot Download

on:
# push:
# branches:
# - 'release/3.*'
# schedule:
# - cron: '0 20 * * 1-6' # Run every night at 20:00 (08:00 PM) UTC except Sunday
workflow_dispatch: # Run manually
Expand Down
3 changes: 3 additions & 0 deletions .github/workflows/qa-sync-from-scratch-minimal-node.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
name: QA - Sync from scratch (minimal node)

on:
# push:
# branches:
# - 'release/3.*'
# schedule:
# - cron: '0 0 * * *' # Run every night at 00:00 AM UTC
workflow_dispatch: # Run manually
Expand Down
3 changes: 3 additions & 0 deletions .github/workflows/qa-tip-tracking-gnosis.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
name: QA - Tip tracking (Gnosis)

on:
# push:
# branches:
# - 'release/3.*'
# schedule:
# - cron: '0 0 * * 1-6' # Run every night at 00:00 AM UTC except Sunday
workflow_dispatch: # Run manually
Expand Down
3 changes: 3 additions & 0 deletions .github/workflows/qa-tip-tracking-polygon.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
name: QA - Tip tracking (Polygon)

on:
# push:
# branches:
# - 'release/3.*'
# schedule:
# - cron: '0 0 * * 1-6' # Run every night at 00:00 AM UTC except Sunday
workflow_dispatch: # Run manually
Expand Down
3 changes: 3 additions & 0 deletions .github/workflows/qa-tip-tracking.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
name: QA - Tip tracking

on:
# push:
# branches:
# - 'release/3.*'
# schedule:
# - cron: '0 20 * * 1-6' # Run every night at 08:00 PM UTC except Sunday
workflow_dispatch: # Run manually
Expand Down
17 changes: 13 additions & 4 deletions .github/workflows/release.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
name: Release
run-name: Build release ${{ inputs.release_version}} from branch ${{ inputs.checkout_ref }} by @${{ github.actor }}
run-name: Build release ${{ inputs.release_version}} from branch ${{ inputs.checkout_ref }}, Skip tests=${{ inputs.skip_tests }}

env:
APPLICATION: "bsc-erigon"
Expand Down Expand Up @@ -37,6 +37,11 @@ on:
type: boolean
default: false
description: 'publish_latest_tag: when set then docker image with tag :latest will be also published'
skip_tests:
required: false
type: boolean
default: false
description: 'Skip tests during release build (not recommended)'

jobs:

Expand Down Expand Up @@ -200,6 +205,7 @@ jobs:

# test-release:
# name: test on ${{ matrix.id }}
# if: ${{ ! inputs.skip_tests }}
# runs-on: [ self-hosted, Release, "${{ matrix.runner-arch }}" ]
# timeout-minutes: 7200 # 5 days
# needs: [ build-release ]
Expand Down Expand Up @@ -283,6 +289,7 @@ jobs:
build-debian-pkg:
name: Debian packages
needs: [ build-release ]
if: always() && contains(needs.build-release.result, 'success') && !contains(needs.test-release.result, 'failure')
uses: erigontech/erigon/.github/workflows/reusable-release-build-debian-pkg.yml@main
with:
application: ${{ needs.build-release.outputs.application }}
Expand All @@ -291,9 +298,10 @@ jobs:

publish-docker-image:
needs: [ build-release ]
if: always() && contains(needs.build-release.result, 'success') && !contains(needs.test-release.result, 'failure')
runs-on: ubuntu-latest
timeout-minutes: 30
name: Docker image
name: Docker image

steps:

Expand Down Expand Up @@ -370,6 +378,7 @@ jobs:

publish-release:
needs: [ build-debian-pkg, publish-docker-image, build-release ]
if: always() && contains(needs.build-release.result, 'success') && contains(needs.build-debian-pkg.result, 'success') && contains(needs.publish-docker-image.result, 'success')
runs-on: ubuntu-latest
timeout-minutes: 15
name: Publish release notes
Expand Down Expand Up @@ -427,8 +436,8 @@ jobs:

In-case-of-failure:
name: "In case of failure: remove remote git tag pointing to the new version."
needs: [ publish-release, build-release ]
if: always() && !contains(needs.build-release.result, 'success')
needs: [ publish-release, build-release, test-release ]
if: always() && !contains(needs.build-release.result, 'success') && contains(needs.test-release.result, 'failure') && !contains(needs.publish-release.result, 'success') && !contains(needs.build-debian-pkg.result, 'success') && !contains(needs.publish-docker-image.result, 'success')
runs-on: ubuntu-22.04

steps:
Expand Down
18 changes: 10 additions & 8 deletions cl/cltypes/solid/validator_set.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,16 +91,17 @@ func (v *ValidatorSet) expandBuffer(newValidatorSetLength int) {
func (v *ValidatorSet) Append(val Validator) {
offset := v.EncodingSizeSSZ()
// we are overflowing the buffer? append.
if offset+validatorSize >= len(v.buffer) {
v.expandBuffer(v.l + 1)
v.phase0Data = append(v.phase0Data, Phase0Data{})
//if offset+validatorSize >= len(v.buffer) {
v.expandBuffer(v.l + 1)
v.phase0Data = append(v.phase0Data, Phase0Data{})
//}

if v.MerkleTree != nil {
v.MerkleTree.AppendLeaf()
}
v.zeroTreeHash(v.l)
copy(v.buffer[offset:], val)
copy(v.buffer[offset:], val)
if v.MerkleTree != nil {
v.MerkleTree.AppendLeaf()
}
v.zeroTreeHash(v.l)

if v.l >= len(v.phase0Data) {
for i := len(v.phase0Data); i < v.l+1; i++ {
v.phase0Data = append(v.phase0Data, Phase0Data{})
Expand Down Expand Up @@ -171,6 +172,7 @@ func (v *ValidatorSet) CopyTo(t *ValidatorSet) {
t.phase0Data = make([]Phase0Data, v.l)
copy(t.buffer, v.buffer)
copy(t.attesterBits, v.attesterBits)
t.buffer = t.buffer[:v.l*validatorSize]
t.attesterBits = t.attesterBits[:v.l]
}

Expand Down
58 changes: 31 additions & 27 deletions cl/merkle_tree/merkle_tree.go
Original file line number Diff line number Diff line change
Expand Up @@ -201,42 +201,46 @@ func (m *MerkleTree) CopyInto(other *MerkleTree) {
m.mu.RLock()
defer m.mu.RUnlock()
defer other.mu.Unlock()
//other.computeLeaf = m.computeLeaf
if len(other.layers) > len(m.layers) {
// reset the internal layers
for i := len(m.layers); i < len(other.layers); i++ {
other.layers[i] = other.layers[i][:0]
}
other.layers = other.layers[:len(m.layers)]

// Copy primitive fields
other.computeLeaf = m.computeLeaf
other.leavesCount = m.leavesCount
if m.limit != nil {
other.limit = new(uint64) // Shallow copy
*other.limit = *m.limit
} else {
other.limit = nil
}

if len(m.layers) > len(other.layers) {
for len(other.layers) != len(m.layers) {
idx := len(other.layers)
other.layers = append(other.layers, make([]byte, len(m.layers[idx]), (len(m.layers[idx])*3)/2))
}
// Ensure `other.layers` has enough capacity (with +50% buffer for future growth)
requiredLayersLen := len(m.layers)
if cap(other.layers) < requiredLayersLen {
other.layers = make([][]byte, requiredLayersLen, requiredLayersLen+(requiredLayersLen/2))
} else {
other.layers = other.layers[:requiredLayersLen]
}

for i := 0; i < len(m.layers); i++ {
// If the destination buffer is too short, extend it
if len(m.layers[i]) > cap(other.layers[i]) {
other.layers[i] = make([]byte, len(m.layers[i]), (len(m.layers[i])*3)/2)
// Copy layers while reusing memory, and allocate with +50% extra space if needed
for i := range m.layers {
requiredLayerLen := len(m.layers[i])
if cap(other.layers[i]) < requiredLayerLen {
other.layers[i] = make([]byte, requiredLayerLen, requiredLayerLen+(requiredLayerLen/2))
} else {
other.layers[i] = other.layers[i][:requiredLayerLen]
}
// Normalizr the destination length
other.layers[i] = other.layers[i][:len(m.layers[i])]

// Now that the 2 slices are of equal length we can do a simple memcopy
copy(other.layers[i], m.layers[i])
}

other.leavesCount = m.leavesCount
other.limit = m.limit
//other.dirtyLeaves = make([]atomic.Bool, len(m.dirtyLeaves))
// Ensure `other.dirtyLeaves` has enough capacity (with +50% buffer for future growth)
requiredLeavesLen := len(m.dirtyLeaves)
if cap(other.dirtyLeaves) < requiredLeavesLen {
other.dirtyLeaves = make([]atomic.Bool, requiredLeavesLen, requiredLeavesLen+(requiredLeavesLen/2))
} else {
other.dirtyLeaves = other.dirtyLeaves[:requiredLeavesLen]
}

for i := 0; i < len(m.dirtyLeaves); i++ {
if i >= len(other.dirtyLeaves) {
other.dirtyLeaves = append(other.dirtyLeaves, atomic.Bool{})
}
// Copy atomic dirty leaves state
for i := range m.dirtyLeaves {
other.dirtyLeaves[i].Store(m.dirtyLeaves[i].Load())
}
}
Expand Down
2 changes: 1 addition & 1 deletion cl/phase1/forkchoice/fork_graph/fork_graph_disk.go
Original file line number Diff line number Diff line change
Expand Up @@ -379,7 +379,7 @@ func (f *forkGraphDisk) useCachedStateIfPossible(blockRoot libcommon.Hash, in *s
}

if prevHeadBlockRoot != blockRoot {
log.Warn("Not Using a cached beacon state", "blockRoot", blockRoot)
log.Debug("Not Using a cached beacon state", "blockRoot", blockRoot)
return nil
}
ok = true
Expand Down
17 changes: 10 additions & 7 deletions cl/phase1/network/beacon_downloader.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ type peerAndBlocks struct {
}

func (f *ForwardBeaconDownloader) RequestMore(ctx context.Context) {
count := uint64(32)
count := uint64(16)
var atomicResp atomic.Value
atomicResp.Store(peerAndBlocks{})
reqInterval := time.NewTicker(300 * time.Millisecond)
Expand All @@ -96,14 +96,17 @@ Loop:
}
// double the request count every 10 seconds. This is inspired by the mekong network, which has many consecutive missing blocks.
reqCount := count
if !f.highestSlotUpdateTime.IsZero() {
multiplier := int(time.Since(f.highestSlotUpdateTime).Seconds()) / 10
multiplier = min(multiplier, 6)
reqCount *= uint64(1 << uint(multiplier))
}
// NEED TO COMMENT THIS BC IT CAUSES ISSUES ON MAINNET

// if !f.highestSlotUpdateTime.IsZero() {
// multiplier := int(time.Since(f.highestSlotUpdateTime).Seconds()) / 10
// multiplier = min(multiplier, 6)
// reqCount *= uint64(1 << uint(multiplier))
// }

// leave a warning if we are stuck for more than 90 seconds
if time.Since(f.highestSlotUpdateTime) > 90*time.Second {
log.Debug("Forward beacon downloader gets stuck", "time", time.Since(f.highestSlotUpdateTime).Seconds(), "highestSlotProcessed", f.highestSlotProcessed)
log.Trace("Forward beacon downloader gets stuck", "time", time.Since(f.highestSlotUpdateTime).Seconds(), "highestSlotProcessed", f.highestSlotProcessed)
}
// this is so we do not get stuck on a side-fork
responses, peerId, err := f.rpc.SendBeaconBlocksByRangeReq(ctx, reqSlot, reqCount)
Expand Down
Loading
Loading