diff --git a/.github/workflows/ci-cd-main-branch-docker-images.yml b/.github/workflows/ci-cd-main-branch-docker-images.yml
index e5e39ad1932..588a3f7f904 100644
--- a/.github/workflows/ci-cd-main-branch-docker-images.yml
+++ b/.github/workflows/ci-cd-main-branch-docker-images.yml
@@ -3,7 +3,7 @@ run-name: "Commit id ${{ github.sha }}: CI-CD build and deploy docker images bas
env:
APPLICATION: "erigon"
- BUILDER_IMAGE: "golang:1.23-alpine"
+ BUILDER_IMAGE: "golang:1.24-alpine"
TARGET_BASE_IMAGE: "alpine:3.21"
APP_REPO: "erigontech/erigon"
CHECKOUT_REF: "main"
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 43d9be738a3..cc2151c36b0 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -42,7 +42,7 @@ jobs:
- uses: actions/setup-go@v5
with:
- go-version: '1.22'
+ go-version: '1.23'
cache: ${{ contains(fromJSON('[
"refs/heads/release/2.60",
"refs/heads/release/2.61",
@@ -93,7 +93,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
- go-version: '1.22'
+ go-version: '1.23'
cache: ${{ contains(fromJSON('[
"refs/heads/release/2.60",
"refs/heads/release/2.61",
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
index 00047b10bdf..3df3eeec3fc 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/lint.yml
@@ -26,13 +26,13 @@ jobs:
fetch-depth: 0
- uses: actions/setup-go@v5
with:
- go-version: '1.23'
+ go-version: '1.24'
- name: Install golangci-lint
if: runner.os == 'Linux'
uses: golangci/golangci-lint-action@v6
with:
- version: v1.63.4
+ version: v1.64.6
skip-cache: true
- name: Lint
diff --git a/.github/workflows/qa-rpc-integration-tests.yml b/.github/workflows/qa-rpc-integration-tests.yml
index e34ace66eef..80afe198468 100644
--- a/.github/workflows/qa-rpc-integration-tests.yml
+++ b/.github/workflows/qa-rpc-integration-tests.yml
@@ -18,7 +18,7 @@ on:
jobs:
integration-test-suite:
- runs-on: [ self-hosted, Erigon3 ]
+ runs-on: [ self-hosted, RpcSpecific ]
timeout-minutes: 15
env:
ERIGON_REFERENCE_DATA_DIR: /opt/erigon-versions/reference-version/datadir
@@ -34,7 +34,7 @@ jobs:
- name: Checkout RPC Tests Repository & Install Requirements
run: |
rm -rf ${{ runner.workspace }}/rpc-tests
- git -c advice.detachedHead=false clone --depth 1 --branch v1.46.0 https://github.com/erigontech/rpc-tests ${{runner.workspace}}/rpc-tests
+ git -c advice.detachedHead=false clone --depth 1 --branch v1.49.0 https://github.com/erigontech/rpc-tests ${{runner.workspace}}/rpc-tests
cd ${{ runner.workspace }}/rpc-tests
pip3 install -r requirements.txt
diff --git a/.github/workflows/qa-rpc-performance-tests.yml b/.github/workflows/qa-rpc-performance-tests.yml
index fdf35bffee6..65113d23a3d 100644
--- a/.github/workflows/qa-rpc-performance-tests.yml
+++ b/.github/workflows/qa-rpc-performance-tests.yml
@@ -14,7 +14,7 @@ jobs:
matrix:
include:
- chain: mainnet
- backend: Erigon3
+ backend: RpcSpecific
#- chain: bor-mainnet
# backend: Polygon
runs-on: [ self-hosted, "${{ matrix.backend }}" ]
diff --git a/.github/workflows/test-erigon-is-library.yml b/.github/workflows/test-erigon-is-library.yml
index e165dca93b7..507cc99767e 100644
--- a/.github/workflows/test-erigon-is-library.yml
+++ b/.github/workflows/test-erigon-is-library.yml
@@ -20,7 +20,7 @@ jobs:
- run: git submodule update --init --recursive --force
- uses: actions/setup-go@v5
with:
- go-version: '1.22'
+ go-version: '1.23'
- name: Install dependencies on Linux
if: runner.os == 'Linux'
run: sudo apt update && sudo apt install build-essential
diff --git a/.github/workflows/test-hive-eest.yml b/.github/workflows/test-hive-eest.yml
index 1f36f20e9cf..fe88a0225d1 100644
--- a/.github/workflows/test-hive-eest.yml
+++ b/.github/workflows/test-hive-eest.yml
@@ -20,7 +20,7 @@ jobs:
- name: Setup go env and cache
uses: actions/setup-go@v5
with:
- go-version: '>=1.22'
+ go-version: '>=1.23'
go-version-file: 'hive/go.mod'
# Targetting the clients/erigon/Dockerfile.git in the Hive director -
diff --git a/.github/workflows/test-hive.yml b/.github/workflows/test-hive.yml
index 4f5ec79bacf..5bf57bdc357 100644
--- a/.github/workflows/test-hive.yml
+++ b/.github/workflows/test-hive.yml
@@ -24,7 +24,7 @@ jobs:
- name: Setup go env and cache
uses: actions/setup-go@v5
with:
- go-version: '>=1.22'
+ go-version: '>=1.23'
go-version-file: 'hive/go.mod'
# Targetting the clients/erigon/Dockerfile.git in the Hive director -
diff --git a/.github/workflows/test-integration-caplin.yml b/.github/workflows/test-integration-caplin.yml
index c8dbfd285d6..d0a74671f75 100644
--- a/.github/workflows/test-integration-caplin.yml
+++ b/.github/workflows/test-integration-caplin.yml
@@ -26,7 +26,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
- go-version: '1.22'
+ go-version: '1.23'
cache: ${{ contains(fromJSON('[
"refs/heads/release/2.60",
"refs/heads/release/2.61",
@@ -50,7 +50,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
- go-version: '1.22'
+ go-version: '1.23'
cache: ${{ contains(fromJSON('[
"refs/heads/release/2.60",
"refs/heads/release/2.61",
diff --git a/ChangeLog.md b/ChangeLog.md
index cea63061f1d..79f468ee93d 100644
--- a/ChangeLog.md
+++ b/ChangeLog.md
@@ -1,7 +1,64 @@
ChangeLog
---------
-## v3.0.0-beta2 (in development)
+## v3.1.0 (in development)
+
+**Improvements:**
+
+TODO
+
+**Bugfixes:**
+
+TODO
+
+### TODO
+
+- milestones:
+https://github.com/erigontech/erigon/milestone/31
+
+
+## v3.0.0 (in development)
+
+
+### Milestone
+
+https://github.com/erigontech/erigon/milestone/30
+
+## v3.0.0-rc2
+
+**Bugfixes:**
+
+- Caplin: error on aggregation_bit merge by @domiwei in https://github.com/erigontech/erigon/pull/14063
+- Pectra: fix bad deposit contract deposit unmarshalling by @Giulio2002 in https://github.com/erigontech/erigon/pull/14068
+
+### Milestone
+
+https://github.com/erigontech/erigon/milestone/36
+
+## v3.0.0-rc1
+
+**Improvements:**
+
+- Schedule Pectra for Chiado by @yperbasis in https://github.com/erigontech/erigon/pull/13898
+- stagedsync: dbg option to log receipts on receipts hash mismatch (#13905) by @taratorio in https://github.com/erigontech/erigon/pull/13940
+- Introduces a new method for estimating transaction gas that targets the maximum gas a contract could use (#13913). Fixes eth_estimateGas for historical blocks (#13903) by @somnathb1 in https://github.com/erigontech/erigon/pull/13916
+
+**Bugfixes:**
+
+- rpcdaemon: Show state sync transactions in eth_getLogs (#13924) by @shohamc1 in https://github.com/erigontech/erigon/pull/13951
+- polygon/heimdall: fix snapshot store last entity to check in snapshots too (#13845) by @taratorio in https://github.com/erigontech/erigon/pull/13938
+- Implemented wait if heimdall is not synced to the chain (#13807) by @taratorio in https://github.com/erigontech/erigon/pull/13939
+
+**Known Problems:**
+
+- polygon: `eth_getLogs` if search by filters - doesn't return state-sync (state-sync events are not indexed yet). Without filter can see state-sync events. In `eth_getReceipts` also can see. [Will](https://github.com/erigontech/erigon/issues/14003) release fixed files in E3.1
+- polygon: `eth_getLogs` state-sync events have incorrect `index` field. [Will](https://github.com/erigontech/erigon/issues/14003) release fixed files in E3.1
+
+### Milestone
+
+https://github.com/erigontech/erigon/milestone/34
+
+## v3.0.0-beta2
### Breaking changes
- Reverts Optimize gas by default in eth_createAccessList #8337
diff --git a/Dockerfile b/Dockerfile
index 13081471d7f..d9786457a49 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,5 +1,5 @@
# syntax = docker/dockerfile:1.2
-FROM docker.io/library/golang:1.22.12-alpine3.20 AS builder
+FROM docker.io/library/golang:1.24.1-alpine3.20 AS builder
RUN apk --no-cache add build-base linux-headers git bash ca-certificates libstdc++
@@ -18,7 +18,7 @@ RUN --mount=type=cache,target=/root/.cache \
make BUILD_TAGS=nosqlite,noboltdb,nosilkworm all
-FROM docker.io/library/golang:1.22.12-alpine3.20 AS tools-builder
+FROM docker.io/library/golang:1.24.1-alpine3.20 AS tools-builder
RUN apk --no-cache add build-base linux-headers git bash ca-certificates libstdc++
WORKDIR /app
@@ -36,7 +36,7 @@ RUN --mount=type=cache,target=/root/.cache \
--mount=type=cache,target=/go/pkg/mod \
make db-tools
-FROM docker.io/library/alpine:3.19
+FROM docker.io/library/alpine:3.20
# install required runtime libs, along with some helpers for debugging
RUN apk add --no-cache ca-certificates libstdc++ tzdata
diff --git a/Dockerfile.debian b/Dockerfile.debian
index 4f4841def0d..1a2e953efdb 100644
--- a/Dockerfile.debian
+++ b/Dockerfile.debian
@@ -1,5 +1,5 @@
# syntax = docker/dockerfile:1.2
-FROM docker.io/library/golang:1.21-bullseye AS builder
+FROM docker.io/library/golang:1.24-bullseye AS builder
RUN apt update
RUN apt install -y build-essential git bash ca-certificates libstdc++6
@@ -17,7 +17,7 @@ RUN --mount=type=cache,target=/root/.cache \
make all
-FROM docker.io/library/golang:1.21-alpine3.17 AS tools-builder
+FROM docker.io/library/golang:1.24-alpine3.20 AS tools-builder
RUN apk --no-cache add build-base linux-headers git bash ca-certificates libstdc++
WORKDIR /app
diff --git a/Dockerfile.release b/Dockerfile.release
index 5f89d78a40c..a330cd48364 100644
--- a/Dockerfile.release
+++ b/Dockerfile.release
@@ -1,5 +1,5 @@
ARG RELEASE_DOCKER_BASE_IMAGE="debian:12.8-slim" \
- CI_CD_MAIN_BUILDER_IMAGE="golang:1.22-bookworm" \
+ CI_CD_MAIN_BUILDER_IMAGE="golang:1.24-bookworm" \
CI_CD_MAIN_TARGET_BASE_IMAGE="alpine:3" \
UID_ERIGON=1000 \
GID_ERIGON=1000 \
diff --git a/README.md b/README.md
index 66b1ca8c6f1..713e0d0f45f 100644
--- a/README.md
+++ b/README.md
@@ -80,7 +80,7 @@ Set `--prune.mode` to "archive" if you need an archive node or to "minimal" if y
System Requirements
===================
-RAM: >=32GB, [Golang >= 1.22](https://golang.org/doc/install); GCC 10+ or Clang; On Linux: kernel > v4. 64-bit
+RAM: >=32GB, [Golang >= 1.23](https://golang.org/doc/install); GCC 10+ or Clang; On Linux: kernel > v4. 64-bit
architecture.
- ArchiveNode Ethereum Mainnet: 2TB (April 2024). FullNode: 1.1TB (June 2024)
@@ -680,7 +680,7 @@ Windows users may run erigon in 3 possible ways:
build on windows :
* [Git](https://git-scm.com/downloads) for Windows must be installed. If you're cloning this repository is very
likely you already have it
- * [GO Programming Language](https://golang.org/dl/) must be installed. Minimum required version is 1.22
+ * [GO Programming Language](https://golang.org/dl/) must be installed. Minimum required version is 1.23
* GNU CC Compiler at least version 13 (is highly suggested that you install `chocolatey` package manager - see
following point)
* If you need to build MDBX tools (i.e. `.\wmake.ps1 db-tools`)
diff --git a/cl/aggregation/pool_test.go b/cl/aggregation/pool_test.go
index 6b294b8cd93..7c2f835ccf1 100644
--- a/cl/aggregation/pool_test.go
+++ b/cl/aggregation/pool_test.go
@@ -121,7 +121,7 @@ func (t *PoolTestSuite) TestAddAttestationElectra() {
CommitteeBits: cBits1,
}
att2 := &solid.Attestation{
- AggregationBits: solid.BitlistFromBytes([]byte{0b00001100}, 2048*64),
+ AggregationBits: solid.BitlistFromBytes([]byte{0b00001101}, 2048*64),
Data: attData1,
Signature: [96]byte{'d', 'e', 'f', 'g', 'h', 'i'},
CommitteeBits: cBits2,
diff --git a/cl/beacon/synced_data/mock_services/synced_data_mock.go b/cl/beacon/synced_data/mock_services/synced_data_mock.go
index 50993f29a7e..b6c60cccd47 100644
--- a/cl/beacon/synced_data/mock_services/synced_data_mock.go
+++ b/cl/beacon/synced_data/mock_services/synced_data_mock.go
@@ -465,17 +465,17 @@ func (c *MockSyncedDataViewHeadStateCall) DoAndReturn(f func(synced_data.ViewHea
}
// ViewPreviousHeadState mocks base method.
-func (m *MockSyncedData) ViewPreviousHeadState(arg0 synced_data.ViewHeadStateFn) error {
+func (m *MockSyncedData) ViewPreviousHeadState(fn synced_data.ViewHeadStateFn) error {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "ViewPreviousHeadState", arg0)
+ ret := m.ctrl.Call(m, "ViewPreviousHeadState", fn)
ret0, _ := ret[0].(error)
return ret0
}
// ViewPreviousHeadState indicates an expected call of ViewPreviousHeadState.
-func (mr *MockSyncedDataMockRecorder) ViewPreviousHeadState(arg0 any) *MockSyncedDataViewPreviousHeadStateCall {
+func (mr *MockSyncedDataMockRecorder) ViewPreviousHeadState(fn any) *MockSyncedDataViewPreviousHeadStateCall {
mr.mock.ctrl.T.Helper()
- call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ViewPreviousHeadState", reflect.TypeOf((*MockSyncedData)(nil).ViewPreviousHeadState), arg0)
+ call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ViewPreviousHeadState", reflect.TypeOf((*MockSyncedData)(nil).ViewPreviousHeadState), fn)
return &MockSyncedDataViewPreviousHeadStateCall{Call: call}
}
diff --git a/cl/cltypes/solid/bitlist.go b/cl/cltypes/solid/bitlist.go
index 9c0d9755bd0..ec743e56313 100644
--- a/cl/cltypes/solid/bitlist.go
+++ b/cl/cltypes/solid/bitlist.go
@@ -140,6 +140,7 @@ func (u *BitList) removeMsb() {
// addMsb adds a most significant bit to the list, but doesn't change the length l.
func (u *BitList) addMsb() int {
byteLen := len(u.u)
+ found := false
for i := len(u.u) - 1; i >= 0; i-- {
if u.u[i] != 0 {
msb := bits.Len8(u.u[i])
@@ -152,10 +153,16 @@ func (u *BitList) addMsb() int {
} else {
u.u[i] |= 1 << uint(msb)
}
+ found = true
break
}
byteLen--
}
+ if !found {
+ u.u[0] = 1
+ byteLen = 1
+ }
+ u.l = byteLen
return byteLen
}
@@ -267,7 +274,7 @@ func (u *BitList) Merge(other *BitList) (*BitList, error) {
}
// copy by the longer one
var ret, unionFrom *BitList
- if u.l < other.l {
+ if u.Bits() < other.Bits() {
ret = other.Copy()
unionFrom = u
} else {
@@ -276,13 +283,10 @@ func (u *BitList) Merge(other *BitList) (*BitList, error) {
}
// union
unionFrom.removeMsb()
- ret.removeMsb()
for i := 0; i < unionFrom.l; i++ {
ret.u[i] |= unionFrom.u[i]
}
unionFrom.addMsb()
- byteLen := ret.addMsb()
- ret.l = byteLen
return ret, nil
}
diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go
index 81c44355106..353d6dcb3aa 100644
--- a/cmd/integration/commands/stages.go
+++ b/cmd/integration/commands/stages.go
@@ -932,7 +932,7 @@ func stagePolygonSync(db kv.TemporalRwDB, ctx context.Context, logger log.Logger
stageState := stage(stageSync, tx, nil, stages.PolygonSync)
cfg := stagedsync.NewPolygonSyncStageCfg(ðconfig.Defaults, logger, chainConfig, nil, heimdallClient,
- heimdallStore, bridgeStore, nil, 0, nil, blockReader, nil, 0, unwindTypes, nil /* notifications */, nil)
+ heimdallStore, bridgeStore, nil, 0, nil, blockReader, nil, 0, unwindTypes, nil /* notifications */, nil, nil)
// we only need blockReader and blockWriter (blockWriter is constructed in NewPolygonSyncStageCfg)
if unwind > 0 {
u := stageSync.NewUnwindState(stageState.ID, stageState.BlockNumber-unwind, stageState.BlockNumber, true, false)
@@ -1591,6 +1591,7 @@ func newSync(ctx context.Context, db kv.TemporalRwDB, miningConfig *params.Minin
stagedsync.StageSendersCfg(db, sentryControlServer.ChainConfig, cfg.Sync, false, dirs.Tmp, cfg.Prune, blockReader, sentryControlServer.Hd),
stagedsync.StageMiningExecCfg(db, miner, events, *chainConfig, engine, &vm.Config{}, dirs.Tmp, nil, 0, nil, blockReader),
stagedsync.StageMiningFinishCfg(db, *chainConfig, engine, miner, miningCancel, blockReader, builder.NewLatestBlockBuiltStore()),
+ false,
),
stagedsync.MiningUnwindOrder,
stagedsync.MiningPruneOrder,
diff --git a/cmd/observer/database/db_retrier.go b/cmd/observer/database/db_retrier.go
index 835932da86f..c584dd22ccf 100644
--- a/cmd/observer/database/db_retrier.go
+++ b/cmd/observer/database/db_retrier.go
@@ -18,7 +18,7 @@ package database
import (
"context"
- "math/rand"
+ "math/rand/v2"
"time"
"github.com/erigontech/erigon-lib/log/v3"
@@ -38,7 +38,8 @@ func retryBackoffTime(attempt int) time.Duration {
if attempt <= 0 {
return 0
}
- jitter := rand.Int63n(30 * time.Millisecond.Nanoseconds() * int64(attempt)) // nolint: gosec
+
+ jitter := rand.Int64N(30 * time.Millisecond.Nanoseconds() * int64(attempt)) // nolint: gosec
var ns int64
if attempt <= 6 {
ns = ((50 * time.Millisecond.Nanoseconds()) << (attempt - 1)) + jitter
diff --git a/cmd/state/exec3/historical_trace_worker.go b/cmd/state/exec3/historical_trace_worker.go
index e3522e91c80..b4b1be0b838 100644
--- a/cmd/state/exec3/historical_trace_worker.go
+++ b/cmd/state/exec3/historical_trace_worker.go
@@ -23,10 +23,10 @@ import (
"sync/atomic"
"time"
- "github.com/erigontech/erigon-lib/chain/networkname"
"golang.org/x/sync/errgroup"
"github.com/erigontech/erigon-lib/chain"
+ "github.com/erigontech/erigon-lib/chain/networkname"
"github.com/erigontech/erigon-lib/common"
"github.com/erigontech/erigon-lib/common/datadir"
"github.com/erigontech/erigon-lib/common/dbg"
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 319b939d97b..e2a6a4ccc4a 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -1724,7 +1724,7 @@ func setBorConfig(ctx *cli.Context, cfg *ethconfig.Config, nodeConfig *nodecfg.C
heimdall.RecordWayPoints(cfg.WithHeimdallWaypointRecording || cfg.PolygonSync || cfg.PolygonSyncStage)
chainConfig := params.ChainConfigByChainName(ctx.String(ChainFlag.Name))
- if chainConfig.Bor != nil && !ctx.IsSet(MaxPeersFlag.Name) {
+ if chainConfig != nil && chainConfig.Bor != nil && !ctx.IsSet(MaxPeersFlag.Name) {
// override default max devp2p peers for polygon as per
// https://forum.polygon.technology/t/introducing-our-new-dns-discovery-for-polygon-pos-faster-smarter-more-connected/19871
// which encourages high peer count
diff --git a/consensus/misc/eip6110.go b/consensus/misc/eip6110.go
index f516fcb1164..e4969cee8d1 100644
--- a/consensus/misc/eip6110.go
+++ b/consensus/misc/eip6110.go
@@ -29,8 +29,11 @@ const (
BLSPubKeyLen = 48
WithdrawalCredentialsLen = 32 // withdrawalCredentials size
BLSSigLen = 96 // signature size
+
)
+var depositTopic = libcommon.HexToHash("0x649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5")
+
var (
// DepositABI is an ABI instance of beacon chain deposit events.
DepositABI = abi.ABI{Events: map[string]abi.Event{"DepositEvent": depositEvent}}
@@ -77,7 +80,7 @@ func ParseDepositLogs(logs []*types.Log, depositContractAddress libcommon.Addres
}
reqData := make([]byte, 0, len(logs)*types.DepositRequestDataLen)
for _, l := range logs {
- if l.Address == depositContractAddress {
+ if l.Address == depositContractAddress && len(l.Topics) > 0 && l.Topics[0] == depositTopic {
d, err := unpackDepositLog(l.Data)
if err != nil {
return nil, fmt.Errorf("unable to parse deposit data: %v", err)
diff --git a/core/snaptype/headers_freezer.go b/core/snaptype/headers_freezer.go
new file mode 100644
index 00000000000..a67a66d84ca
--- /dev/null
+++ b/core/snaptype/headers_freezer.go
@@ -0,0 +1,109 @@
+package snaptype
+
+import (
+ "context"
+ "encoding/binary"
+ "fmt"
+ "runtime"
+ "time"
+
+ "github.com/erigontech/erigon-lib/common"
+ common2 "github.com/erigontech/erigon-lib/common"
+ "github.com/erigontech/erigon-lib/common/dbg"
+ "github.com/erigontech/erigon-lib/common/hexutil"
+ "github.com/erigontech/erigon-lib/crypto"
+ "github.com/erigontech/erigon-lib/crypto/cryptopool"
+ "github.com/erigontech/erigon-lib/kv"
+ "github.com/erigontech/erigon-lib/log/v3"
+
+ "github.com/erigontech/erigon-lib/rlp"
+ "github.com/erigontech/erigon-lib/state"
+ ee "github.com/erigontech/erigon-lib/state/appendable_extras"
+ "github.com/erigontech/erigon/core/types"
+)
+
+type HeaderFreezer struct {
+ canonicalTbl, valsTbl string
+ coll state.Collector
+ logger log.Logger
+}
+
+var _ state.Freezer = (*HeaderFreezer)(nil)
+
+func NewHeaderFreezer(canonicalTbl, valsTbl string, logger log.Logger) *HeaderFreezer {
+ return &HeaderFreezer{canonicalTbl, valsTbl, nil, logger}
+}
+
+func (f *HeaderFreezer) Freeze(ctx context.Context, blockFrom, blockTo ee.RootNum, db kv.RoDB) error {
+ logEvery := time.NewTicker(20 * time.Second)
+ defer logEvery.Stop()
+
+ key := make([]byte, 8+32)
+ from := hexutil.EncodeTs(uint64(blockFrom))
+ return kv.BigChunks(db, f.canonicalTbl, from, func(tx kv.Tx, k, v []byte) (bool, error) {
+ blockNum := binary.BigEndian.Uint64(k)
+ if blockNum >= uint64(blockTo) {
+ return false, nil
+ }
+ copy(key, k)
+ copy(key[8:], v)
+ dataRLP, err := tx.GetOne(f.valsTbl, key)
+ if err != nil {
+ return false, err
+ }
+ if dataRLP == nil {
+ return false, fmt.Errorf("header missed in db: block_num=%d, hash=%x", blockNum, v)
+ }
+ h := types.Header{}
+ if err := rlp.DecodeBytes(dataRLP, &h); err != nil {
+ return false, err
+ }
+
+ value := make([]byte, len(dataRLP)+1) // first_byte_of_header_hash + header_rlp
+ value[0] = h.Hash()[0]
+ copy(value[1:], dataRLP)
+ if err := f.coll(value); err != nil {
+ return false, err
+ }
+
+ select {
+ case <-ctx.Done():
+ return false, ctx.Err()
+ case <-logEvery.C:
+ var m runtime.MemStats
+ dbg.ReadMemStats(&m)
+ f.logger.Info("[snapshots] Dumping headers", "block num", blockNum,
+ "alloc", common2.ByteCount(m.Alloc), "sys", common2.ByteCount(m.Sys),
+ )
+ default:
+ }
+ return true, nil
+ })
+}
+
+func (f *HeaderFreezer) SetCollector(coll state.Collector) {
+ f.coll = coll
+}
+
+var _ state.IndexKeyFactory = (*HeaderAccessorIndexKeyFactory)(nil)
+
+type HeaderAccessorIndexKeyFactory struct {
+ s crypto.KeccakState
+ h common.Hash
+}
+
+func (f *HeaderAccessorIndexKeyFactory) Refresh() {
+ f.s = crypto.NewKeccakState()
+}
+
+func (f *HeaderAccessorIndexKeyFactory) Make(word []byte, _ uint64) []byte {
+ headerRlp := word[1:]
+ f.s.Reset()
+ f.s.Write(headerRlp)
+ f.s.Read(f.h[:])
+ return f.h[:]
+}
+
+func (f *HeaderAccessorIndexKeyFactory) Close() {
+ cryptopool.ReturnToPoolKeccak256(f.s)
+}
diff --git a/core/test/appendable_test.go b/core/test/appendable_test.go
new file mode 100644
index 00000000000..28dcce1b1ca
--- /dev/null
+++ b/core/test/appendable_test.go
@@ -0,0 +1,330 @@
+package test
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "math/big"
+ "os"
+ "testing"
+
+ "github.com/c2h5oh/datasize"
+ "github.com/erigontech/erigon-lib/chain/snapcfg"
+ "github.com/erigontech/erigon-lib/common"
+ "github.com/erigontech/erigon-lib/common/background"
+ "github.com/erigontech/erigon-lib/common/datadir"
+ "github.com/erigontech/erigon-lib/kv"
+ "github.com/erigontech/erigon-lib/kv/mdbx"
+ "github.com/erigontech/erigon-lib/log/v3"
+ "github.com/erigontech/erigon-lib/state"
+ ae "github.com/erigontech/erigon-lib/state/appendable_extras"
+ "github.com/erigontech/erigon/core/snaptype"
+ "github.com/erigontech/erigon/core/types"
+ "github.com/stretchr/testify/require"
+)
+
+type Num = state.Num
+type RootNum = state.RootNum
+type EntityId = ae.AppendableId
+
+// test marked appendable
+func TestMarkedAppendableRegistration(t *testing.T) {
+ // just registration goes fine
+ t.Cleanup(func() {
+ ae.Cleanup()
+ })
+ dirs := datadir.New(t.TempDir())
+ blockId := registerEntity(dirs, "blocks")
+ require.Equal(t, ae.AppendableId(0), blockId)
+ headerId := registerEntity(dirs, "headers")
+ require.Equal(t, ae.AppendableId(1), headerId)
+}
+
+func registerEntity(dirs datadir.Dirs, name string) ae.AppendableId {
+ preverified := snapcfg.Mainnet
+ return ae.RegisterAppendable(name, dirs, preverified, ae.WithSnapshotCreationConfig(
+ &ae.SnapshotConfig{
+ SnapshotCreationConfig: &ae.SnapshotCreationConfig{
+ EntitiesPerStep: 1000,
+ MergeStages: []uint64{1000, 20000, 600000},
+ MinimumSize: 1000000,
+ SafetyMargin: 1000,
+ },
+ },
+ ))
+}
+
+func registerEntityRelaxed(dirs datadir.Dirs, name string) ae.AppendableId {
+ return ae.RegisterAppendable(name, dirs, nil, ae.WithSnapshotCreationConfig(
+ &ae.SnapshotConfig{
+ SnapshotCreationConfig: &ae.SnapshotCreationConfig{
+ EntitiesPerStep: 10,
+ MergeStages: []uint64{20, 40},
+ MinimumSize: 10,
+ SafetyMargin: 5,
+ },
+ },
+ ))
+}
+
+func setup(tb testing.TB) (datadir.Dirs, kv.RwDB, log.Logger) {
+ tb.Helper()
+ logger := log.New()
+ dirs := datadir.New(tb.TempDir())
+ db := mdbx.New(kv.ChainDB, logger).InMem(dirs.Chaindata).GrowthStep(32 * datasize.MB).MapSize(2 * datasize.GB).MustOpen()
+ return dirs, db, logger
+}
+
+func setupHeader(t *testing.T, log log.Logger, dir datadir.Dirs, db kv.RoDB) (EntityId, *state.Appendable[state.MarkedTxI]) {
+ headerId := registerEntityRelaxed(dir, "headers")
+ require.Equal(t, ae.AppendableId(0), headerId)
+
+ // create marked appendable
+ freezer := snaptype.NewHeaderFreezer(kv.HeaderCanonical, kv.Headers, log)
+
+ builder := state.NewSimpleAccessorBuilder(state.NewAccessorArgs(true, true), headerId, log,
+ state.WithIndexKeyFactory(&snaptype.HeaderAccessorIndexKeyFactory{}))
+
+ ma, err := state.NewMarkedAppendable(headerId, kv.Headers, kv.HeaderCanonical, ae.IdentityRootRelationInstance, log,
+ state.App_WithFreezer[state.MarkedTxI](freezer),
+ state.App_WithPruneFrom[state.MarkedTxI](Num(1)),
+ state.App_WithIndexBuilders[state.MarkedTxI](builder),
+ )
+ require.NoError(t, err)
+
+ t.Cleanup(func() {
+ ma.Close()
+ ma.RecalcVisibleFiles(0)
+ //ma = nil
+
+ ae.Cleanup()
+ db.Close()
+ os.RemoveAll(dir.Snap)
+ os.RemoveAll(dir.Chaindata)
+ })
+
+ return headerId, ma
+}
+
+func TestMarked_PutToDb(t *testing.T) {
+ /*
+ put, get, getnc on db-only data
+ */
+ dir, db, log := setup(t)
+ _, ma := setupHeader(t, log, dir, db)
+
+ ma_tx := ma.BeginFilesRo()
+ defer ma_tx.Close()
+ rwtx, err := db.BeginRw(context.Background())
+ defer rwtx.Rollback()
+ require.NoError(t, err)
+
+ num := Num(1)
+ hash := common.HexToHash("0x1234").Bytes()
+ value := []byte{1, 2, 3, 4, 5}
+
+ err = ma_tx.Put(num, hash, value, rwtx)
+ require.NoError(t, err)
+ returnv, snap, err := ma_tx.Get(num, rwtx.(kv.Tx))
+ require.NoError(t, err)
+ require.Equal(t, value, returnv)
+ require.False(t, snap)
+
+ returnv, err = ma_tx.GetNc(num, hash, rwtx.(kv.Tx))
+ require.NoError(t, err)
+ require.Equal(t, value, returnv)
+
+ returnv, err = ma_tx.GetNc(num, []byte{1}, rwtx.(kv.Tx))
+ require.NoError(t, err)
+ require.True(t, returnv == nil) // Equal fails
+
+ require.Equal(t, ma_tx.Type(), state.Marked)
+}
+
+func TestPrune(t *testing.T) {
+ // prune
+ for pruneTo := RootNum(0); ; pruneTo++ {
+ var entries_count uint64
+ t.Run(fmt.Sprintf("prune to %d", pruneTo), func(t *testing.T) {
+ dir, db, log := setup(t)
+ headerId, ma := setupHeader(t, log, dir, db)
+
+ ctx := context.Background()
+ cfg := headerId.SnapshotConfig()
+ entries_count = cfg.MinimumSize + cfg.SafetyMargin + /** in db **/ 5
+
+ ma_tx := ma.BeginFilesRo()
+ defer ma_tx.Close()
+ rwtx, err := db.BeginRw(ctx)
+ defer rwtx.Rollback()
+ require.NoError(t, err)
+
+ buffer := &bytes.Buffer{}
+
+ getData := func(i int) (num Num, hash []byte, value []byte) {
+ header := &types.Header{
+ Number: big.NewInt(int64(i)),
+ Extra: []byte("test header"),
+ }
+ buffer.Reset()
+ err = header.EncodeRLP(buffer)
+ require.NoError(t, err)
+
+ return Num(i), header.Hash().Bytes(), buffer.Bytes()
+ }
+
+ for i := range int(entries_count) {
+ num, hash, value := getData(i)
+ err = ma_tx.Put(num, hash, value, rwtx)
+ require.NoError(t, err)
+ }
+
+ require.NoError(t, rwtx.Commit())
+ ma_tx.Close()
+
+ rwtx, err = db.BeginRw(ctx)
+ defer rwtx.Rollback()
+ require.NoError(t, err)
+
+ del, err := ma_tx.Prune(ctx, pruneTo, 1000, rwtx)
+ require.NoError(t, err)
+ cfgPruneFrom := int64(ma.PruneFrom())
+ require.Equal(t, int64(del), max(0, min(int64(pruneTo), int64(entries_count))-cfgPruneFrom))
+
+ require.NoError(t, rwtx.Commit())
+ ma_tx = ma.BeginFilesRo()
+ defer ma_tx.Close()
+ rwtx, err = db.BeginRw(ctx)
+ require.NoError(t, err)
+ defer rwtx.Rollback()
+ })
+ if pruneTo >= RootNum(entries_count+1) {
+ break
+ }
+ }
+
+}
+func TestUnwind(t *testing.T) {
+ // unwind
+}
+
+func TestBuildFiles(t *testing.T) {
+ // put stuff until build files is called
+ // and snapshot is created (with indexes)
+ // check beginfilesro works with new visible file
+
+ // then check get
+ // then prune and check get
+ // then unwind and check get
+
+ dir, db, log := setup(t)
+ headerId, ma := setupHeader(t, log, dir, db)
+ ctx := context.Background()
+
+ ma_tx := ma.BeginFilesRo()
+ defer ma_tx.Close()
+ rwtx, err := db.BeginRw(ctx)
+ defer rwtx.Rollback()
+ require.NoError(t, err)
+ cfg := headerId.SnapshotConfig()
+ entries_count := cfg.MinimumSize + cfg.SafetyMargin + /** in db **/ 2
+ buffer := &bytes.Buffer{}
+
+ getData := func(i int) (num Num, hash []byte, value []byte) {
+ header := &types.Header{
+ Number: big.NewInt(int64(i)),
+ Extra: []byte("test header"),
+ }
+ buffer.Reset()
+ err = header.EncodeRLP(buffer)
+ require.NoError(t, err)
+
+ return Num(i), header.Hash().Bytes(), buffer.Bytes()
+ }
+
+ for i := range int(entries_count) {
+ num, hash, value := getData(i)
+ err = ma_tx.Put(num, hash, value, rwtx)
+ require.NoError(t, err)
+ }
+
+ require.NoError(t, rwtx.Commit())
+ ma_tx.Close()
+
+ ps := background.NewProgressSet()
+ files, err := ma.BuildFiles(ctx, 0, RootNum(entries_count), db, ps)
+ require.NoError(t, err)
+ require.True(t, len(files) == 1) // 1 snapshot made
+
+ ma.IntegrateDirtyFiles(files)
+ ma.RecalcVisibleFiles(RootNum(entries_count))
+
+ ma_tx = ma.BeginFilesRo()
+ defer ma_tx.Close()
+
+ rwtx, err = db.BeginRw(ctx)
+ defer rwtx.Rollback()
+ require.NoError(t, err)
+
+ firstRootNumNotInSnap := ma_tx.VisibleFilesMaxRootNum()
+ del, err := ma_tx.Prune(ctx, firstRootNumNotInSnap, 1000, rwtx)
+ require.NoError(t, err)
+ require.Equal(t, del, uint64(firstRootNumNotInSnap)-uint64(ma.PruneFrom()))
+
+ require.NoError(t, rwtx.Commit())
+ ma_tx = ma.BeginFilesRo()
+ defer ma_tx.Close()
+ rwtx, err = db.BeginRw(ctx)
+ require.NoError(t, err)
+ defer rwtx.Rollback()
+
+ // check unified interface
+ for i := range int(entries_count) {
+ num, hash, value := getData(i)
+ returnv, snap, err := ma_tx.Get(num, rwtx)
+ require.NoError(t, err)
+
+ require.True(t, snap == (num < Num(firstRootNumNotInSnap)))
+ require.Equal(t, value, returnv)
+
+ // just look in db....
+ if num < ma.PruneFrom() || num >= Num(firstRootNumNotInSnap) {
+ // these should be in db
+ returnv, err = ma_tx.GetNc(num, hash, rwtx)
+ require.NoError(t, err)
+ require.Equal(t, value, returnv)
+
+ value, err = rwtx.GetOne(kv.HeaderCanonical, num.EncTo8Bytes())
+ require.NoError(t, err)
+ require.Equal(t, value, hash)
+ found := false
+
+ rwtx.ForAmount(kv.Headers, num.EncTo8Bytes(), 1, func(k, v []byte) error {
+ require.Equal(t, k[:8], num.EncTo8Bytes())
+ found = true
+ require.Equal(t, k[8:], hash)
+ return nil
+ })
+
+ require.True(t, found, "value expected to be found in db")
+ } else {
+ // these should not be in db (pruned)
+ value, err = rwtx.GetOne(kv.HeaderCanonical, num.EncTo8Bytes())
+ require.NoError(t, err)
+ require.True(t, value == nil)
+
+ rwtx.ForAmount(kv.Headers, num.EncTo8Bytes(), 1, func(k, v []byte) error {
+ if !bytes.Equal(k[:8], num.EncTo8Bytes()) {
+ return nil
+ }
+ require.Fail(t, "should not be in db")
+ return nil
+ })
+ }
+ }
+}
+
+func TestMerging(t *testing.T) {
+ // keep stuffing data until things merge
+ // begin files ro should give "right" files.
+}
diff --git a/core/types/blob_test_util.go b/core/types/blob_test_util.go
new file mode 100644
index 00000000000..42e65965768
--- /dev/null
+++ b/core/types/blob_test_util.go
@@ -0,0 +1,120 @@
+// Copyright 2025 The Erigon Authors
+// This file is part of Erigon.
+//
+// Erigon is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// Erigon is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with Erigon. If not, see .
+
+package types
+
+import (
+ "fmt"
+
+ gokzg4844 "github.com/crate-crypto/go-kzg-4844"
+ "github.com/erigontech/erigon-lib/common"
+ "github.com/holiman/uint256"
+
+ "github.com/erigontech/erigon-lib/common/hexutil"
+ "github.com/erigontech/erigon-lib/crypto/kzg"
+ "github.com/erigontech/erigon/core/types/testdata"
+)
+
+func MakeBlobTxnRlp() ([]byte, []gokzg4844.KZGCommitment) {
+ bodyRlp := hexutil.MustDecodeHex(testdata.BodyRlpHex)
+
+ blobsRlpPrefix := hexutil.MustDecodeHex("fa040008")
+ blobRlpPrefix := hexutil.MustDecodeHex("ba020000")
+
+ var blob0, blob1 = gokzg4844.Blob{}, gokzg4844.Blob{}
+ copy(blob0[:], hexutil.MustDecodeHex(testdata.ValidBlob1Hex))
+ copy(blob1[:], hexutil.MustDecodeHex(testdata.ValidBlob2Hex))
+
+ var err error
+ proofsRlpPrefix := hexutil.MustDecodeHex("f862")
+ commitment0, _ := kzg.Ctx().BlobToKZGCommitment(blob0[:], 0)
+ commitment1, _ := kzg.Ctx().BlobToKZGCommitment(blob1[:], 0)
+ proof0, err := kzg.Ctx().ComputeBlobKZGProof(blob0[:], commitment0, 0)
+ if err != nil {
+ fmt.Println("error", err)
+ }
+ proof1, err := kzg.Ctx().ComputeBlobKZGProof(blob1[:], commitment1, 0)
+ if err != nil {
+ fmt.Println("error", err)
+ }
+
+ wrapperRlp := hexutil.MustDecodeHex("03fa0401fe")
+ wrapperRlp = append(wrapperRlp, bodyRlp...)
+ wrapperRlp = append(wrapperRlp, blobsRlpPrefix...)
+ wrapperRlp = append(wrapperRlp, blobRlpPrefix...)
+ wrapperRlp = append(wrapperRlp, blob0[:]...)
+ wrapperRlp = append(wrapperRlp, blobRlpPrefix...)
+ wrapperRlp = append(wrapperRlp, blob1[:]...)
+ wrapperRlp = append(wrapperRlp, proofsRlpPrefix...)
+ wrapperRlp = append(wrapperRlp, 0xb0)
+ wrapperRlp = append(wrapperRlp, commitment0[:]...)
+ wrapperRlp = append(wrapperRlp, 0xb0)
+ wrapperRlp = append(wrapperRlp, commitment1[:]...)
+ wrapperRlp = append(wrapperRlp, proofsRlpPrefix...)
+ wrapperRlp = append(wrapperRlp, 0xb0)
+ wrapperRlp = append(wrapperRlp, proof0[:]...)
+ wrapperRlp = append(wrapperRlp, 0xb0)
+ wrapperRlp = append(wrapperRlp, proof1[:]...)
+
+ return wrapperRlp, []gokzg4844.KZGCommitment{commitment0, commitment1}
+}
+
+func MakeWrappedBlobTxn(chainId *uint256.Int) *BlobTxWrapper {
+ wrappedTxn := BlobTxWrapper{}
+ wrappedTxn.Tx.To = &common.Address{129, 26, 117, 44, 140, 214, 151, 227, 203, 39, 39, 156, 51, 14, 209, 173, 167, 69, 168, 215}
+ wrappedTxn.Tx.Nonce = 0
+ wrappedTxn.Tx.GasLimit = 100000
+ wrappedTxn.Tx.Value = uint256.NewInt(0)
+ wrappedTxn.Tx.Data = []byte{4, 247}
+ wrappedTxn.Tx.ChainID = chainId
+ wrappedTxn.Tx.TipCap = uint256.NewInt(10000000000)
+ wrappedTxn.Tx.FeeCap = uint256.NewInt(10000000000)
+ wrappedTxn.Tx.MaxFeePerBlobGas = uint256.NewInt(123)
+
+ wrappedTxn.Blobs = make(Blobs, 2)
+ wrappedTxn.Commitments = make(BlobKzgs, 2)
+ wrappedTxn.Proofs = make(KZGProofs, 2)
+
+ copy(wrappedTxn.Blobs[0][:], hexutil.MustDecodeHex(testdata.ValidBlob1Hex))
+ copy(wrappedTxn.Blobs[1][:], hexutil.MustDecodeHex(testdata.ValidBlob2Hex))
+
+ commitment0, err := kzg.Ctx().BlobToKZGCommitment(wrappedTxn.Blobs[0][:], 0)
+ if err != nil {
+ panic(err)
+ }
+ commitment1, err := kzg.Ctx().BlobToKZGCommitment(wrappedTxn.Blobs[1][:], 0)
+ if err != nil {
+ panic(err)
+ }
+ copy(wrappedTxn.Commitments[0][:], commitment0[:])
+ copy(wrappedTxn.Commitments[1][:], commitment1[:])
+
+ proof0, err := kzg.Ctx().ComputeBlobKZGProof(wrappedTxn.Blobs[0][:], commitment0, 0)
+ if err != nil {
+ panic(err)
+ }
+ proof1, err := kzg.Ctx().ComputeBlobKZGProof(wrappedTxn.Blobs[1][:], commitment1, 0)
+ if err != nil {
+ panic(err)
+ }
+ copy(wrappedTxn.Proofs[0][:], proof0[:])
+ copy(wrappedTxn.Proofs[1][:], proof1[:])
+
+ wrappedTxn.Tx.BlobVersionedHashes = make([]common.Hash, 2)
+ wrappedTxn.Tx.BlobVersionedHashes[0] = common.Hash(kzg.KZGToVersionedHash(commitment0))
+ wrappedTxn.Tx.BlobVersionedHashes[1] = common.Hash(kzg.KZGToVersionedHash(commitment1))
+ return &wrappedTxn
+}
diff --git a/core/types/blob_tx_wrapper.go b/core/types/blob_tx_wrapper.go
index b18a194f309..f0e9b3392d4 100644
--- a/core/types/blob_tx_wrapper.go
+++ b/core/types/blob_tx_wrapper.go
@@ -17,6 +17,7 @@
package types
import (
+ "bytes"
"errors"
"fmt"
"io"
@@ -75,9 +76,9 @@ func (li BlobKzgs) payloadSize() int {
func (li BlobKzgs) encodePayload(w io.Writer, b []byte, payloadSize int) error {
// prefix
- if err := rlp.EncodeStructSizePrefix(payloadSize, w, b); err != nil {
- return err
- }
+ buf := newEncodingBuf()
+ l := rlp.EncodeListPrefix(payloadSize, buf[:])
+ w.Write(buf[:l])
for _, cmtmt := range li {
if err := rlp.EncodeString(cmtmt[:], w, b); err != nil {
@@ -125,9 +126,9 @@ func (li KZGProofs) payloadSize() int {
func (li KZGProofs) encodePayload(w io.Writer, b []byte, payloadSize int) error {
// prefix
- if err := rlp.EncodeStructSizePrefix(payloadSize, w, b); err != nil {
- return err
- }
+ buf := newEncodingBuf()
+ l := rlp.EncodeListPrefix(payloadSize, buf[:])
+ w.Write(buf[:l])
for _, proof := range li {
if err := rlp.EncodeString(proof[:], w, b); err != nil {
@@ -179,10 +180,10 @@ func (blobs Blobs) payloadSize() int {
func (blobs Blobs) encodePayload(w io.Writer, b []byte, payloadSize int) error {
// prefix
- if err := rlp.EncodeStructSizePrefix(payloadSize, w, b); err != nil {
- return err
- }
+ buf := newEncodingBuf()
+ l := rlp.EncodeListPrefix(payloadSize, buf[:])
+ w.Write(buf[:l])
for _, blob := range blobs {
if err := rlp.EncodeString(blob[:], w, b); err != nil {
return err
@@ -376,6 +377,49 @@ func (txw *BlobTxWrapper) DecodeRLP(s *rlp.Stream) error {
func (txw *BlobTxWrapper) EncodingSize() int {
return txw.Tx.EncodingSize()
}
+func (txw *BlobTxWrapper) payloadSize() (payloadSize int) {
+ l, _, _, _, _ := txw.Tx.payloadSize()
+ payloadSize += l + rlp.ListPrefixLen(l)
+ l = txw.Blobs.payloadSize()
+ payloadSize += l + rlp.ListPrefixLen(l)
+ l = txw.Commitments.payloadSize()
+ payloadSize += l + rlp.ListPrefixLen(l)
+ l = txw.Proofs.payloadSize()
+ payloadSize += l + rlp.ListPrefixLen(l)
+ return
+}
+func (txw *BlobTxWrapper) MarshalBinaryWrapped(w io.Writer) error {
+ b := newEncodingBuf()
+ defer pooledBuf.Put(b)
+ // encode TxType
+ b[0] = BlobTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ payloadSize := txw.payloadSize()
+ l := rlp.EncodeListPrefix(payloadSize, b[1:])
+ if _, err := w.Write(b[1 : 1+l]); err != nil {
+ return err
+ }
+ bw := bytes.Buffer{}
+ if err := txw.Tx.MarshalBinary(&bw); err != nil {
+ return err
+ }
+ if _, err := w.Write(bw.Bytes()[1:]); err != nil {
+ return err
+ }
+
+ if err := txw.Blobs.encodePayload(w, b[:], txw.Blobs.payloadSize()); err != nil {
+ return err
+ }
+ if err := txw.Commitments.encodePayload(w, b[:], txw.Commitments.payloadSize()); err != nil {
+ return err
+ }
+ if err := txw.Proofs.encodePayload(w, b[:], txw.Proofs.payloadSize()); err != nil {
+ return err
+ }
+ return nil
+}
func (txw *BlobTxWrapper) MarshalBinary(w io.Writer) error {
return txw.Tx.MarshalBinary(w)
}
diff --git a/core/types/block_test.go b/core/types/block_test.go
index e5929c0b650..af9dd09dd18 100644
--- a/core/types/block_test.go
+++ b/core/types/block_test.go
@@ -600,13 +600,6 @@ func TestCopyTxs(t *testing.T) {
}
}
- populateBlobWrapperTxs()
- for _, txn := range dummyBlobWrapperTxs {
- if txn.Tx.To != nil {
- txs = append(txs, txn)
- }
- }
-
copies := CopyTxs(txs)
assert.Equal(t, txs, copies)
}
diff --git a/core/types/typestest/test_data.go b/core/types/testdata/testdata.go
similarity index 99%
rename from core/types/typestest/test_data.go
rename to core/types/testdata/testdata.go
index d7ffc148e4c..9d044b18c77 100644
--- a/core/types/typestest/test_data.go
+++ b/core/types/testdata/testdata.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Erigon Authors
+// Copyright 2025 The Erigon Authors
// This file is part of Erigon.
//
// Erigon is free software: you can redistribute it and/or modify
@@ -14,61 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with Erigon. If not, see .
-package typestest
-
-import (
- "fmt"
-
- gokzg4844 "github.com/crate-crypto/go-kzg-4844"
-
- "github.com/erigontech/erigon-lib/common/hexutil"
- "github.com/erigontech/erigon-lib/crypto/kzg"
-)
-
-func MakeBlobTxnRlp() ([]byte, []gokzg4844.KZGCommitment) {
- bodyRlp := hexutil.MustDecodeHex(BodyRlpHex)
-
- blobsRlpPrefix := hexutil.MustDecodeHex("fa040008")
- blobRlpPrefix := hexutil.MustDecodeHex("ba020000")
-
- var blob0, blob1 = gokzg4844.Blob{}, gokzg4844.Blob{}
- copy(blob0[:], hexutil.MustDecodeHex(ValidBlob1Hex))
- copy(blob1[:], hexutil.MustDecodeHex(ValidBlob2Hex))
-
- var err error
- proofsRlpPrefix := hexutil.MustDecodeHex("f862")
- commitment0, _ := kzg.Ctx().BlobToKZGCommitment(blob0[:], 0)
- commitment1, _ := kzg.Ctx().BlobToKZGCommitment(blob1[:], 0)
-
- proof0, err := kzg.Ctx().ComputeBlobKZGProof(blob0[:], commitment0, 0)
- if err != nil {
- fmt.Println("error", err)
- }
- proof1, err := kzg.Ctx().ComputeBlobKZGProof(blob1[:], commitment1, 0)
- if err != nil {
- fmt.Println("error", err)
- }
-
- wrapperRlp := hexutil.MustDecodeHex("03fa0401fe")
- wrapperRlp = append(wrapperRlp, bodyRlp...)
- wrapperRlp = append(wrapperRlp, blobsRlpPrefix...)
- wrapperRlp = append(wrapperRlp, blobRlpPrefix...)
- wrapperRlp = append(wrapperRlp, blob0[:]...)
- wrapperRlp = append(wrapperRlp, blobRlpPrefix...)
- wrapperRlp = append(wrapperRlp, blob1[:]...)
- wrapperRlp = append(wrapperRlp, proofsRlpPrefix...)
- wrapperRlp = append(wrapperRlp, 0xb0)
- wrapperRlp = append(wrapperRlp, commitment0[:]...)
- wrapperRlp = append(wrapperRlp, 0xb0)
- wrapperRlp = append(wrapperRlp, commitment1[:]...)
- wrapperRlp = append(wrapperRlp, proofsRlpPrefix...)
- wrapperRlp = append(wrapperRlp, 0xb0)
- wrapperRlp = append(wrapperRlp, proof0[:]...)
- wrapperRlp = append(wrapperRlp, 0xb0)
- wrapperRlp = append(wrapperRlp, proof1[:]...)
-
- return wrapperRlp, []gokzg4844.KZGCommitment{commitment0, commitment1}
-}
+package testdata
const (
diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go
index 625dd5955ad..db7590ab227 100644
--- a/core/types/transaction_test.go
+++ b/core/types/transaction_test.go
@@ -40,7 +40,6 @@ import (
"github.com/erigontech/erigon-lib/common/u256"
"github.com/erigontech/erigon-lib/crypto"
"github.com/erigontech/erigon-lib/rlp"
- "github.com/erigontech/erigon/core/types/typestest"
)
// The values in those tests are from the Transaction Tests
@@ -702,12 +701,6 @@ func populateBlobTxs() {
}
}
-func populateBlobWrapperTxs() {
- for i := 0; i < N; i++ {
- dummyBlobWrapperTxs[i] = newRandBlobWrapper()
- }
-}
-
func TestBlobTxEncodeDecode(t *testing.T) {
rand.Seed(time.Now().UnixNano())
populateBlobTxs()
@@ -737,7 +730,7 @@ func TestBlobTxEncodeDecode(t *testing.T) {
}
func TestShortUnwrap(t *testing.T) {
- blobTxRlp, _ := typestest.MakeBlobTxnRlp()
+ blobTxRlp, _ := MakeBlobTxnRlp()
shortRlp, err := UnwrapTxPlayloadRlp(blobTxRlp)
if err != nil {
t.Errorf("short rlp stripping failed: %v", err)
@@ -749,7 +742,7 @@ func TestShortUnwrap(t *testing.T) {
t.Errorf("short rlp decoding failed : %v", err)
}
wrappedBlobTx := BlobTxWrapper{}
- blockTxRlp2, _ := typestest.MakeBlobTxnRlp()
+ blockTxRlp2, _ := MakeBlobTxnRlp()
err = wrappedBlobTx.DecodeRLP(rlp.NewStream(bytes.NewReader(blockTxRlp2[1:]), 0))
if err != nil {
t.Errorf("long rlp decoding failed: %v", err)
diff --git a/core/vm/contracts_test.go b/core/vm/contracts_test.go
index 0bcbd6245ac..581e30da432 100644
--- a/core/vm/contracts_test.go
+++ b/core/vm/contracts_test.go
@@ -53,24 +53,24 @@ type precompiledFailureTest struct {
// allPrecompiles does not map to the actual set of precompiles, as it also contains
// repriced versions of precompiles at certain slots
var allPrecompiles = map[libcommon.Address]PrecompiledContract{
- libcommon.BytesToAddress([]byte{1}): &ecrecover{},
- libcommon.BytesToAddress([]byte{2}): &sha256hash{},
- libcommon.BytesToAddress([]byte{3}): &ripemd160hash{},
- libcommon.BytesToAddress([]byte{4}): &dataCopy{},
- libcommon.BytesToAddress([]byte{5}): &bigModExp{eip2565: false},
+ libcommon.BytesToAddress([]byte{0x01}): &ecrecover{},
+ libcommon.BytesToAddress([]byte{0x02}): &sha256hash{},
+ libcommon.BytesToAddress([]byte{0x03}): &ripemd160hash{},
+ libcommon.BytesToAddress([]byte{0x04}): &dataCopy{},
+ libcommon.BytesToAddress([]byte{0x05}): &bigModExp{eip2565: false},
libcommon.BytesToAddress([]byte{0xf5}): &bigModExp{eip2565: true},
- libcommon.BytesToAddress([]byte{6}): &bn256AddIstanbul{},
- libcommon.BytesToAddress([]byte{7}): &bn256ScalarMulIstanbul{},
- libcommon.BytesToAddress([]byte{8}): &bn256PairingIstanbul{},
- libcommon.BytesToAddress([]byte{9}): &blake2F{},
- libcommon.BytesToAddress([]byte{10}): &bls12381G1Add{},
- libcommon.BytesToAddress([]byte{12}): &bls12381G1MultiExp{},
- libcommon.BytesToAddress([]byte{13}): &bls12381G2Add{},
- libcommon.BytesToAddress([]byte{15}): &bls12381G2MultiExp{},
- libcommon.BytesToAddress([]byte{16}): &bls12381Pairing{},
- libcommon.BytesToAddress([]byte{17}): &bls12381MapFpToG1{},
- libcommon.BytesToAddress([]byte{18}): &bls12381MapFp2ToG2{},
- libcommon.BytesToAddress([]byte{20}): &pointEvaluation{},
+ libcommon.BytesToAddress([]byte{0x06}): &bn256AddIstanbul{},
+ libcommon.BytesToAddress([]byte{0x07}): &bn256ScalarMulIstanbul{},
+ libcommon.BytesToAddress([]byte{0x08}): &bn256PairingIstanbul{},
+ libcommon.BytesToAddress([]byte{0x09}): &blake2F{},
+ libcommon.BytesToAddress([]byte{0x0a}): &pointEvaluation{},
+ libcommon.BytesToAddress([]byte{0x0b}): &bls12381G1Add{},
+ libcommon.BytesToAddress([]byte{0x0c}): &bls12381G1MultiExp{},
+ libcommon.BytesToAddress([]byte{0x0d}): &bls12381G2Add{},
+ libcommon.BytesToAddress([]byte{0x0e}): &bls12381G2MultiExp{},
+ libcommon.BytesToAddress([]byte{0x0f}): &bls12381Pairing{},
+ libcommon.BytesToAddress([]byte{0x10}): &bls12381MapFpToG1{},
+ libcommon.BytesToAddress([]byte{0x11}): &bls12381MapFp2ToG2{},
libcommon.BytesToAddress([]byte{0x01, 0x00}): &p256Verify{},
}
@@ -265,7 +265,7 @@ func TestPrecompiledModExpOOG(t *testing.T) {
}
func TestModExpPrecompilePotentialOutOfRange(t *testing.T) {
- modExpContract := PrecompiledContractsCancun[libcommon.BytesToAddress([]byte{0x05})]
+ modExpContract := allPrecompiles[libcommon.BytesToAddress([]byte{0xf5})]
hexString := "0x0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000ffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000ee"
input := hexutil.MustDecode(hexString)
maxGas := uint64(math.MaxUint64)
@@ -323,32 +323,31 @@ func benchJson(name, addr string, b *testing.B) {
}
}
-func TestPrecompiledBLS12381G1Add(t *testing.T) { testJson("blsG1Add", "0a", t) }
+func TestPrecompiledBLS12381G1Add(t *testing.T) { testJson("blsG1Add", "0b", t) }
func TestPrecompiledBLS12381G1MultiExp(t *testing.T) { testJson("blsG1MultiExp", "0c", t) }
func TestPrecompiledBLS12381G2Add(t *testing.T) { testJson("blsG2Add", "0d", t) }
-func TestPrecompiledBLS12381G2MultiExp(t *testing.T) { testJson("blsG2MultiExp", "0f", t) }
-func TestPrecompiledBLS12381Pairing(t *testing.T) { testJson("blsPairing", "10", t) }
-func TestPrecompiledBLS12381MapG1(t *testing.T) { testJson("blsMapG1", "11", t) }
-func TestPrecompiledBLS12381MapG2(t *testing.T) { testJson("blsMapG2", "12", t) }
-func TestPrecompiledPointEvaluation(t *testing.T) { testJson("pointEvaluation", "14", t) }
-
-func BenchmarkPrecompiledBLS12381G1Add(b *testing.B) { benchJson("blsG1Add", "0a", b) }
-func BenchmarkPrecompiledBLS12381G1Mul(b *testing.B) { benchJson("blsG1Mul", "0b", b) }
+func TestPrecompiledBLS12381G2MultiExp(t *testing.T) { testJson("blsG2MultiExp", "0e", t) }
+func TestPrecompiledBLS12381Pairing(t *testing.T) { testJson("blsPairing", "0f", t) }
+func TestPrecompiledBLS12381MapG1(t *testing.T) { testJson("blsMapG1", "10", t) }
+func TestPrecompiledBLS12381MapG2(t *testing.T) { testJson("blsMapG2", "11", t) }
+func TestPrecompiledPointEvaluation(t *testing.T) { testJson("pointEvaluation", "0a", t) }
+
+func BenchmarkPrecompiledBLS12381G1Add(b *testing.B) { benchJson("blsG1Add", "0b", b) }
func BenchmarkPrecompiledBLS12381G1MultiExp(b *testing.B) { benchJson("blsG1MultiExp", "0c", b) }
func BenchmarkPrecompiledBLS12381G2Add(b *testing.B) { benchJson("blsG2Add", "0d", b) }
-func BenchmarkPrecompiledBLS12381G2MultiExp(b *testing.B) { benchJson("blsG2MultiExp", "0f", b) }
-func BenchmarkPrecompiledBLS12381Pairing(b *testing.B) { benchJson("blsPairing", "10", b) }
-func BenchmarkPrecompiledBLS12381MapG1(b *testing.B) { benchJson("blsMapG1", "11", b) }
-func BenchmarkPrecompiledBLS12381MapG2(b *testing.B) { benchJson("blsMapG2", "12", b) }
+func BenchmarkPrecompiledBLS12381G2MultiExp(b *testing.B) { benchJson("blsG2MultiExp", "0e", b) }
+func BenchmarkPrecompiledBLS12381Pairing(b *testing.B) { benchJson("blsPairing", "0f", b) }
+func BenchmarkPrecompiledBLS12381MapG1(b *testing.B) { benchJson("blsMapG1", "10", b) }
+func BenchmarkPrecompiledBLS12381MapG2(b *testing.B) { benchJson("blsMapG2", "11", b) }
// Failure tests
-func TestPrecompiledBLS12381G1AddFail(t *testing.T) { testJsonFail("blsG1Add", "0a", t) }
+func TestPrecompiledBLS12381G1AddFail(t *testing.T) { testJsonFail("blsG1Add", "0b", t) }
func TestPrecompiledBLS12381G1MultiExpFail(t *testing.T) { testJsonFail("blsG1MultiExp", "0c", t) }
func TestPrecompiledBLS12381G2AddFail(t *testing.T) { testJsonFail("blsG2Add", "0d", t) }
-func TestPrecompiledBLS12381G2MultiExpFail(t *testing.T) { testJsonFail("blsG2MultiExp", "0f", t) }
-func TestPrecompiledBLS12381PairingFail(t *testing.T) { testJsonFail("blsPairing", "10", t) }
-func TestPrecompiledBLS12381MapG1Fail(t *testing.T) { testJsonFail("blsMapG1", "11", t) }
-func TestPrecompiledBLS12381MapG2Fail(t *testing.T) { testJsonFail("blsMapG2", "12", t) }
+func TestPrecompiledBLS12381G2MultiExpFail(t *testing.T) { testJsonFail("blsG2MultiExp", "0e", t) }
+func TestPrecompiledBLS12381PairingFail(t *testing.T) { testJsonFail("blsPairing", "0f", t) }
+func TestPrecompiledBLS12381MapG1Fail(t *testing.T) { testJsonFail("blsMapG1", "10", t) }
+func TestPrecompiledBLS12381MapG2Fail(t *testing.T) { testJsonFail("blsMapG2", "11", t) }
func loadJson(name string) ([]precompiledTest, error) {
data, err := os.ReadFile(fmt.Sprintf("testdata/precompiles/%v.json", name))
diff --git a/debug.Dockerfile b/debug.Dockerfile
index 0a76f6cd8d5..6f457c10086 100644
--- a/debug.Dockerfile
+++ b/debug.Dockerfile
@@ -1,5 +1,5 @@
# syntax = docker/dockerfile:1.2
-FROM docker.io/library/golang:1.21-alpine3.17 AS builder
+FROM docker.io/library/golang:1.24-alpine3.20 AS builder
RUN apk --no-cache add build-base linux-headers git bash ca-certificates libstdc++
@@ -16,7 +16,7 @@ RUN --mount=type=cache,target=/root/.cache \
make all
-FROM docker.io/library/golang:1.21-alpine3.17 AS tools-builder
+FROM docker.io/library/golang:1.24-alpine3.20 AS tools-builder
RUN apk --no-cache add build-base linux-headers git bash ca-certificates libstdc++
WORKDIR /app
@@ -27,7 +27,7 @@ ADD go.sum go.sum
RUN mkdir -p /app/build/bin
-FROM docker.io/library/alpine:3.17
+FROM docker.io/library/alpine:3.20
# install required runtime libs, along with some helpers for debugging
RUN apk add --no-cache ca-certificates libstdc++ tzdata
diff --git a/erigon-lib/.golangci.yml b/erigon-lib/.golangci.yml
index 199091ee57a..7bc41c80a54 100644
--- a/erigon-lib/.golangci.yml
+++ b/erigon-lib/.golangci.yml
@@ -36,6 +36,9 @@ linters:
# - stylecheck
linters-settings:
+ gosec:
+ excludes:
+ - G115 #TODO: enable me
gocritic: # see https://golangci-lint.run/usage/linters/#gocritic and https://go-critic.github.io/overview#checks-overview
enabled-tags:
- performance
diff --git a/erigon-lib/common/dbg/experiments.go b/erigon-lib/common/dbg/experiments.go
index 167a4db34dd..a170a03cce5 100644
--- a/erigon-lib/common/dbg/experiments.go
+++ b/erigon-lib/common/dbg/experiments.go
@@ -31,6 +31,8 @@ import (
)
var (
+ MaxReorgDepth = EnvInt("MAX_REORG_DEPTH", 512)
+
doMemstat = EnvBool("NO_MEMSTAT", true)
saveHeapProfile = EnvBool("SAVE_HEAP_PROFILE", false)
heapProfileFilePath = EnvString("HEAP_PROFILE_FILE_PATH", "")
diff --git a/erigon-lib/common/hash.go b/erigon-lib/common/hash.go
index ea6aaf35e29..6488172f490 100644
--- a/erigon-lib/common/hash.go
+++ b/erigon-lib/common/hash.go
@@ -22,7 +22,7 @@ import (
"encoding/hex"
"fmt"
"math/big"
- "math/rand"
+ "math/rand/v2"
"reflect"
"github.com/erigontech/erigon-lib/common/hexutil"
@@ -145,7 +145,7 @@ func (h *Hash) SetBytes(b []byte) {
// Generate implements testing/quick.Generator.
func (h Hash) Generate(rand *rand.Rand, size int) reflect.Value {
- m := rand.Intn(len(h))
+ m := rand.IntN(len(h))
for i := len(h) - 1; i > m; i-- {
h[i] = byte(rand.Uint32())
}
diff --git a/erigon-lib/common/hexutil/errors.go b/erigon-lib/common/hexutil/errors.go
index 6a736975b86..07d0a7963e7 100644
--- a/erigon-lib/common/hexutil/errors.go
+++ b/erigon-lib/common/hexutil/errors.go
@@ -20,15 +20,17 @@ import "fmt"
// These errors are from go-ethereum in order to keep compatibility with geth error codes.
var (
- ErrEmptyString = &decError{"empty hex string"}
- ErrSyntax = &decError{"invalid hex string"}
- ErrMissingPrefix = &decError{"hex string without 0x prefix"}
- ErrOddLength = &decError{"hex string of odd length"}
- ErrEmptyNumber = &decError{"hex string \"0x\""}
- ErrLeadingZero = &decError{"hex number with leading zero digits"}
- ErrUint64Range = &decError{"hex number > 64 bits"}
- ErrUintRange = &decError{fmt.Sprintf("hex number > %d bits", uintBits)}
- ErrBig256Range = &decError{"hex number > 256 bits"}
+ ErrEmptyString = &decError{"empty hex string"}
+ ErrSyntax = &decError{"invalid hex string"}
+ ErrMissingPrefix = &decError{"hex string without 0x prefix"}
+ ErrOddLength = &decError{"hex string of odd length"}
+ ErrEmptyNumber = &decError{"hex string \"0x\""}
+ ErrLeadingZero = &decError{"hex number with leading zero digits"}
+ ErrUint64Range = &decError{"hex number > 64 bits"}
+ ErrUintRange = &decError{fmt.Sprintf("hex number > %d bits", uintBits)}
+ ErrBig256Range = &decError{"hex number > 256 bits"}
+ ErrTooBigHexString = &decError{"hex string too long, want at most 32 bytes"}
+ ErrHexStringInvalid = &decError{"hex string invalid"}
)
type decError struct{ msg string }
diff --git a/erigon-lib/common/hexutil/hexutil.go b/erigon-lib/common/hexutil/hexutil.go
index 5976c166e2b..25eed5a77c7 100644
--- a/erigon-lib/common/hexutil/hexutil.go
+++ b/erigon-lib/common/hexutil/hexutil.go
@@ -141,6 +141,24 @@ func has0xPrefix(input string) bool {
return len(input) >= 2 && input[0] == '0' && (input[1] == 'x' || input[1] == 'X')
}
+// IsValidQuantity checks if input is a valid hex-encoded quantity as per JSON-RPC spec.
+// It returns nil if the input is valid, otherwise an error.
+func IsValidQuantity(input string) error {
+ input, err := checkNumber(input)
+ if err != nil {
+ return err
+ }
+ if len(input) > 64 {
+ return ErrTooBigHexString
+ }
+ for _, b := range input {
+ if decodeNibble(byte(b)) == badNibble {
+ return ErrHexStringInvalid
+ }
+ }
+ return nil
+}
+
func checkNumber(input string) (raw string, err error) {
if len(input) == 0 {
return "", ErrEmptyString
diff --git a/erigon-lib/common/hexutil/hexutil_test.go b/erigon-lib/common/hexutil/hexutil_test.go
index 2ca2e16abda..f3bc45e91f0 100644
--- a/erigon-lib/common/hexutil/hexutil_test.go
+++ b/erigon-lib/common/hexutil/hexutil_test.go
@@ -118,6 +118,30 @@ var (
},
}
+ isValidQtyTests = []unmarshalTest{
+ // invalid
+ {input: ``, wantErr: ErrEmptyString},
+ {input: `0`, wantErr: ErrMissingPrefix},
+ {input: `0x`, wantErr: ErrEmptyNumber},
+ {input: `0x01`, wantErr: ErrLeadingZero},
+ {input: `0x00`, wantErr: ErrLeadingZero},
+ {input: `0x0000000000000000000000000000000000000000000000000000000000000001`, wantErr: ErrLeadingZero},
+ {input: `0x0000000000000000000000000000000000000000000000000000000000000000`, wantErr: ErrLeadingZero},
+ {input: `0x10000000000000000000000000000000000000000000000000000000000000000`, wantErr: ErrTooBigHexString},
+ {input: `0x1zz01`, wantErr: ErrHexStringInvalid},
+ {input: `0xasdf`, wantErr: ErrHexStringInvalid},
+
+ // valid
+ {input: `0x0`, wantErr: nil},
+ {input: `0x1`, wantErr: nil},
+ {input: `0x2F2`, wantErr: nil},
+ {input: `0X2F2`, wantErr: nil},
+ {input: `0x1122aaff`, wantErr: nil},
+ {input: `0xbbb`, wantErr: nil},
+ {input: `0xffffffffffffffff`, wantErr: nil},
+ {input: `0x123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0`, wantErr: nil},
+ }
+
decodeUint64Tests = []unmarshalTest{
// invalid
{input: `0`, wantErr: ErrMissingPrefix},
@@ -199,3 +223,12 @@ func TestEncode(t *testing.T) {
}
}
}
+
+func TestIsValidQuantity(t *testing.T) {
+ for idx, test := range isValidQtyTests {
+ t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) {
+ err := IsValidQuantity(test.input)
+ checkError(t, test.input, err, test.wantErr)
+ })
+ }
+}
diff --git a/erigon-lib/config3/config3.go b/erigon-lib/config3/config3.go
index 8c2c07bdb92..e574af70142 100644
--- a/erigon-lib/config3/config3.go
+++ b/erigon-lib/config3/config3.go
@@ -26,6 +26,4 @@ const StepsInFrozenFile = 64
const EnableHistoryV4InTest = true
-const MaxReorgDepthV3 = 512
-
const DefaultPruneDistance = 100_000
diff --git a/erigon-lib/direct/txpool_client.go b/erigon-lib/direct/txpool_client.go
index 6d49ba54a0b..55f22aecafa 100644
--- a/erigon-lib/direct/txpool_client.go
+++ b/erigon-lib/direct/txpool_client.go
@@ -119,3 +119,7 @@ func (s *TxPoolClient) Status(ctx context.Context, in *txpool_proto.StatusReques
func (s *TxPoolClient) Nonce(ctx context.Context, in *txpool_proto.NonceRequest, opts ...grpc.CallOption) (*txpool_proto.NonceReply, error) {
return s.server.Nonce(ctx, in)
}
+
+func (s *TxPoolClient) GetBlobs(ctx context.Context, in *txpool_proto.GetBlobsRequest, opts ...grpc.CallOption) (*txpool_proto.GetBlobsReply, error) {
+ return s.server.GetBlobs(ctx, in)
+}
diff --git a/erigon-lib/downloader/downloader.go b/erigon-lib/downloader/downloader.go
index a08629ecffa..082ce80b9bf 100644
--- a/erigon-lib/downloader/downloader.go
+++ b/erigon-lib/downloader/downloader.go
@@ -24,7 +24,7 @@ import (
"errors"
"fmt"
"math"
- "math/rand"
+ "math/rand/v2"
"net/http"
"net/url"
"os"
@@ -1748,7 +1748,7 @@ func selectDownloadPeer(ctx context.Context, peerUrls []*url.URL, t *torrent.Tor
}
default:
- peerIndex := rand.Intn(len(peerUrls))
+ peerIndex := rand.IntN(len(peerUrls))
peerUrl := peerUrls[peerIndex]
downloadUrl := peerUrl.JoinPath(t.Name())
peerInfo, err := getWebpeerTorrentInfo(ctx, downloadUrl)
diff --git a/erigon-lib/downloader/snaptype/type.go b/erigon-lib/downloader/snaptype/type.go
index 37cea56a128..6df97401423 100644
--- a/erigon-lib/downloader/snaptype/type.go
+++ b/erigon-lib/downloader/snaptype/type.go
@@ -18,10 +18,10 @@ package snaptype
import (
"context"
+ "crypto/rand"
"encoding/binary"
"errors"
"fmt"
- "math/rand"
"os"
"path/filepath"
"strconv"
@@ -103,7 +103,7 @@ func ReadAndCreateSaltIfNeeded(baseDir string) (uint32, error) {
dir.MustExist(baseDir)
saltBytes := make([]byte, 4)
- binary.BigEndian.PutUint32(saltBytes, rand.Uint32())
+ binary.BigEndian.PutUint32(saltBytes, randUint32())
if err := dir.WriteFileWithFsync(fpath, saltBytes, os.ModePerm); err != nil {
return 0, err
}
@@ -116,7 +116,7 @@ func ReadAndCreateSaltIfNeeded(baseDir string) (uint32, error) {
dir.MustExist(baseDir)
saltBytes := make([]byte, 4)
- binary.BigEndian.PutUint32(saltBytes, rand.Uint32())
+ binary.BigEndian.PutUint32(saltBytes, randUint32())
if err := dir.WriteFileWithFsync(fpath, saltBytes, os.ModePerm); err != nil {
return 0, err
}
@@ -584,3 +584,12 @@ func ExtractRange(ctx context.Context, f FileInfo, extractor RangeExtractor, ind
return lastKeyValue, nil
}
+
+func randUint32() uint32 {
+ var buf [4]byte
+ _, err := rand.Read(buf[:])
+ if err != nil {
+ panic(err)
+ }
+ return binary.LittleEndian.Uint32(buf[:])
+}
diff --git a/erigon-lib/downloader/util.go b/erigon-lib/downloader/util.go
index 2de24028e6f..f73b3c657e5 100644
--- a/erigon-lib/downloader/util.go
+++ b/erigon-lib/downloader/util.go
@@ -19,11 +19,9 @@ package downloader
import (
"bytes"
"context"
- "crypto/sha1"
+ //nolint:gosec
"errors"
"fmt"
- "io"
- "os"
"path"
"path/filepath"
"runtime"
@@ -518,40 +516,3 @@ func ScheduleVerifyFile(ctx context.Context, t *torrent.Torrent, completePieces
}
}
}
-
-func VerifyFileFailFast(ctx context.Context, t *torrent.Torrent, root string, completePieces *atomic.Uint64) error {
- info := t.Info()
- file := info.UpvertedFiles()[0]
- fPath := filepath.Join(append([]string{root, info.Name}, file.Path...)...)
- f, err := os.Open(fPath)
- if err != nil {
- return err
- }
- defer func() {
- if err != nil {
- f.Close()
- }
- }()
-
- hasher := sha1.New()
- for i := 0; i < info.NumPieces(); i++ {
- p := info.Piece(i)
- hasher.Reset()
- _, err := io.Copy(hasher, io.NewSectionReader(f, p.Offset(), p.Length()))
- if err != nil {
- return err
- }
- good := bytes.Equal(hasher.Sum(nil), p.Hash().Bytes())
- if !good {
- return fmt.Errorf("hash mismatch at piece %d, file: %s", i, t.Name())
- }
-
- completePieces.Add(1)
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
- }
- return nil
-}
diff --git a/erigon-lib/downloader/util_bittorrent_v1.go b/erigon-lib/downloader/util_bittorrent_v1.go
new file mode 100644
index 00000000000..502bbb00386
--- /dev/null
+++ b/erigon-lib/downloader/util_bittorrent_v1.go
@@ -0,0 +1,68 @@
+// Copyright 2021 The Erigon Authors
+// This file is part of Erigon.
+//
+// Erigon is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// Erigon is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with Erigon. If not, see .
+
+package downloader
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha1" //nolint:gosec
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sync/atomic"
+
+ "github.com/anacrolix/torrent"
+)
+
+func VerifyFileFailFast(ctx context.Context, t *torrent.Torrent, root string, completePieces *atomic.Uint64) error {
+ info := t.Info()
+ file := info.UpvertedFiles()[0]
+ fPath := filepath.Join(append([]string{root, info.Name}, file.Path...)...)
+ f, err := os.Open(fPath)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ f.Close()
+ }
+ }()
+
+ // Bittorrent v1 using `sha1`
+ hasher := sha1.New() //nolint:gosec
+ for i := 0; i < info.NumPieces(); i++ {
+ p := info.Piece(i)
+ hasher.Reset()
+ _, err := io.Copy(hasher, io.NewSectionReader(f, p.Offset(), p.Length()))
+ if err != nil {
+ return err
+ }
+ good := bytes.Equal(hasher.Sum(nil), p.Hash().Bytes())
+ if !good {
+ return fmt.Errorf("hash mismatch at piece %d, file: %s", i, t.Name())
+ }
+
+ completePieces.Add(1)
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ }
+ return nil
+}
diff --git a/erigon-lib/go.mod b/erigon-lib/go.mod
index 8980f32486a..6c768e4a26d 100644
--- a/erigon-lib/go.mod
+++ b/erigon-lib/go.mod
@@ -1,6 +1,8 @@
module github.com/erigontech/erigon-lib
-go 1.22.12
+go 1.23.0
+
+toolchain go1.23.6
replace (
github.com/anacrolix/torrent => github.com/erigontech/torrent v1.54.3-alpha-1
@@ -10,7 +12,7 @@ replace (
require (
github.com/erigontech/erigon-snapshot v1.3.1-0.20250121111444-6cc4c0c1fb89
- github.com/erigontech/interfaces v0.0.0-20250218124515-7c4293b6afb3
+ github.com/erigontech/interfaces v0.0.0-20250304065538-03fcc1da2237
github.com/erigontech/mdbx-go v0.38.10
github.com/erigontech/secp256k1 v1.1.0
github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417
@@ -52,14 +54,14 @@ require (
github.com/tidwall/btree v1.6.0
github.com/ugorji/go/codec v1.2.12
go.uber.org/mock v0.5.0
- golang.org/x/crypto v0.33.0
- golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c
- golang.org/x/sync v0.11.0
- golang.org/x/sys v0.30.0
- golang.org/x/time v0.10.0
+ golang.org/x/crypto v0.36.0
+ golang.org/x/exp v0.0.0-20250305212735-054e65f0b394
+ golang.org/x/sync v0.12.0
+ golang.org/x/sys v0.31.0
+ golang.org/x/time v0.11.0
google.golang.org/grpc v1.69.4
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1
- google.golang.org/protobuf v1.36.4
+ google.golang.org/protobuf v1.36.5
)
require (
@@ -73,8 +75,8 @@ require (
github.com/opencontainers/runtime-spec v1.2.0 // indirect
github.com/pion/udp v0.1.4 // indirect
go.opentelemetry.io/otel/metric v1.31.0 // indirect
- golang.org/x/mod v0.23.0 // indirect
- golang.org/x/tools v0.30.0 // indirect
+ golang.org/x/mod v0.24.0 // indirect
+ golang.org/x/tools v0.31.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect
modernc.org/libc v1.55.3 // indirect
modernc.org/memory v1.8.0 // indirect
@@ -156,8 +158,8 @@ require (
go.opentelemetry.io/otel v1.31.0 // indirect
go.opentelemetry.io/otel/trace v1.31.0 // indirect
go.uber.org/goleak v1.3.0 // indirect
- golang.org/x/net v0.35.0
- golang.org/x/text v0.22.0 // indirect
+ golang.org/x/net v0.37.0
+ golang.org/x/text v0.23.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
modernc.org/mathutil v1.6.0 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
diff --git a/erigon-lib/go.sum b/erigon-lib/go.sum
index eeca8319ace..aa407229b33 100644
--- a/erigon-lib/go.sum
+++ b/erigon-lib/go.sum
@@ -156,8 +156,8 @@ github.com/erigontech/erigon-snapshot v1.3.1-0.20250121111444-6cc4c0c1fb89 h1:7N
github.com/erigontech/erigon-snapshot v1.3.1-0.20250121111444-6cc4c0c1fb89/go.mod h1:ooHlCl+eEYzebiPu+FP6Q6SpPUeMADn8Jxabv3IKb9M=
github.com/erigontech/go-kzg-4844 v0.0.0-20250130131058-ce13be60bc86 h1:UKcIbFZUGIKzK4aQbkv/dYiOVxZSUuD3zKadhmfwdwU=
github.com/erigontech/go-kzg-4844 v0.0.0-20250130131058-ce13be60bc86/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks=
-github.com/erigontech/interfaces v0.0.0-20250218124515-7c4293b6afb3 h1:Qwd3asRe5aeKzV/oHgADLu92db2417AM5ApjpT8MD1o=
-github.com/erigontech/interfaces v0.0.0-20250218124515-7c4293b6afb3/go.mod h1:N7OUkhkcagp9+7yb4ycHsG2VWCOmuJ1ONBecJshxtLE=
+github.com/erigontech/interfaces v0.0.0-20250304065538-03fcc1da2237 h1:W5Zr2xifo56qE0IF4OR4bOcL+r5jy4HwtJizLwMD9kg=
+github.com/erigontech/interfaces v0.0.0-20250304065538-03fcc1da2237/go.mod h1:N7OUkhkcagp9+7yb4ycHsG2VWCOmuJ1ONBecJshxtLE=
github.com/erigontech/mdbx-go v0.38.10 h1:Nxqxu9QNOU9Bvgs9pq6SKkNYysbjt6b9n929daVNfcU=
github.com/erigontech/mdbx-go v0.38.10/go.mod h1:lkqHAZqXtFaIPlvTaGAx3VUDuGYZcuhve1l4JVVN1Z0=
github.com/erigontech/secp256k1 v1.1.0 h1:mO3YJMUSoASE15Ya//SoHiisptUhdXExuMUN1M0X9qY=
@@ -519,11 +519,11 @@ golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0
golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220516162934-403b01795ae8/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
-golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus=
-golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
+golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
+golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c h1:KL/ZBHXgKGVmuZBZ01Lt57yE5ws8ZPSkkihmEyq7FXc=
-golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU=
+golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw=
+golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -532,8 +532,8 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
-golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM=
-golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
+golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
+golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -562,8 +562,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
-golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
-golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
+golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
+golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -574,8 +574,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
-golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
+golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -613,8 +613,8 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
-golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
+golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -628,10 +628,10 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
-golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
-golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4=
-golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
+golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
+golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
+golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -645,8 +645,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
-golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY=
-golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY=
+golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU=
+golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -680,8 +680,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.36.4 h1:6A3ZDJHn/eNqc1i+IdefRzy/9PokBTPvcqMySR7NNIM=
-google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
+google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/erigon-lib/gointerfaces/downloaderproto/downloader.pb.go b/erigon-lib/gointerfaces/downloaderproto/downloader.pb.go
index 29e6c17f1d5..15aa720b3c1 100644
--- a/erigon-lib/gointerfaces/downloaderproto/downloader.pb.go
+++ b/erigon-lib/gointerfaces/downloaderproto/downloader.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.36.4
+// protoc-gen-go v1.36.3
// protoc v5.29.3
// source: downloader/downloader.proto
@@ -13,7 +13,6 @@ import (
emptypb "google.golang.org/protobuf/types/known/emptypb"
reflect "reflect"
sync "sync"
- unsafe "unsafe"
)
const (
@@ -464,7 +463,7 @@ func (x *TorrentCompletedReply) GetHash() *typesproto.H160 {
var File_downloader_downloader_proto protoreflect.FileDescriptor
-var file_downloader_downloader_proto_rawDesc = string([]byte{
+var file_downloader_downloader_proto_rawDesc = []byte{
0x0a, 0x1b, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x2f, 0x64, 0x6f, 0x77,
0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x64,
0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
@@ -536,16 +535,16 @@ var file_downloader_downloader_proto_rawDesc = string([]byte{
0x2f, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x3b, 0x64, 0x6f, 0x77, 0x6e,
0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x33,
-})
+}
var (
file_downloader_downloader_proto_rawDescOnce sync.Once
- file_downloader_downloader_proto_rawDescData []byte
+ file_downloader_downloader_proto_rawDescData = file_downloader_downloader_proto_rawDesc
)
func file_downloader_downloader_proto_rawDescGZIP() []byte {
file_downloader_downloader_proto_rawDescOnce.Do(func() {
- file_downloader_downloader_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_downloader_downloader_proto_rawDesc), len(file_downloader_downloader_proto_rawDesc)))
+ file_downloader_downloader_proto_rawDescData = protoimpl.X.CompressGZIP(file_downloader_downloader_proto_rawDescData)
})
return file_downloader_downloader_proto_rawDescData
}
@@ -599,7 +598,7 @@ func file_downloader_downloader_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: unsafe.Slice(unsafe.StringData(file_downloader_downloader_proto_rawDesc), len(file_downloader_downloader_proto_rawDesc)),
+ RawDescriptor: file_downloader_downloader_proto_rawDesc,
NumEnums: 0,
NumMessages: 10,
NumExtensions: 0,
@@ -610,6 +609,7 @@ func file_downloader_downloader_proto_init() {
MessageInfos: file_downloader_downloader_proto_msgTypes,
}.Build()
File_downloader_downloader_proto = out.File
+ file_downloader_downloader_proto_rawDesc = nil
file_downloader_downloader_proto_goTypes = nil
file_downloader_downloader_proto_depIdxs = nil
}
diff --git a/erigon-lib/gointerfaces/executionproto/execution.pb.go b/erigon-lib/gointerfaces/executionproto/execution.pb.go
index bf640991c75..0001a7edf8e 100644
--- a/erigon-lib/gointerfaces/executionproto/execution.pb.go
+++ b/erigon-lib/gointerfaces/executionproto/execution.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.36.4
+// protoc-gen-go v1.36.3
// protoc v5.29.3
// source: execution/execution.proto
@@ -13,7 +13,6 @@ import (
emptypb "google.golang.org/protobuf/types/known/emptypb"
reflect "reflect"
sync "sync"
- unsafe "unsafe"
)
const (
@@ -1625,7 +1624,7 @@ func (x *HasBlockResponse) GetHasBlock() bool {
var File_execution_execution_proto protoreflect.FileDescriptor
-var file_execution_execution_proto_rawDesc = string([]byte{
+var file_execution_execution_proto_rawDesc = []byte{
0x0a, 0x19, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x65, 0x78, 0x65, 0x63,
0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x65, 0x78, 0x65,
0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
@@ -1977,16 +1976,16 @@ var file_execution_execution_proto_rawDesc = string([]byte{
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x1c, 0x5a, 0x1a, 0x2e, 0x2f, 0x65, 0x78, 0x65,
0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-})
+}
var (
file_execution_execution_proto_rawDescOnce sync.Once
- file_execution_execution_proto_rawDescData []byte
+ file_execution_execution_proto_rawDescData = file_execution_execution_proto_rawDesc
)
func file_execution_execution_proto_rawDescGZIP() []byte {
file_execution_execution_proto_rawDescOnce.Do(func() {
- file_execution_execution_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_execution_execution_proto_rawDesc), len(file_execution_execution_proto_rawDesc)))
+ file_execution_execution_proto_rawDescData = protoimpl.X.CompressGZIP(file_execution_execution_proto_rawDescData)
})
return file_execution_execution_proto_rawDescData
}
@@ -2135,7 +2134,7 @@ func file_execution_execution_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: unsafe.Slice(unsafe.StringData(file_execution_execution_proto_rawDesc), len(file_execution_execution_proto_rawDesc)),
+ RawDescriptor: file_execution_execution_proto_rawDesc,
NumEnums: 1,
NumMessages: 26,
NumExtensions: 0,
@@ -2147,6 +2146,7 @@ func file_execution_execution_proto_init() {
MessageInfos: file_execution_execution_proto_msgTypes,
}.Build()
File_execution_execution_proto = out.File
+ file_execution_execution_proto_rawDesc = nil
file_execution_execution_proto_goTypes = nil
file_execution_execution_proto_depIdxs = nil
}
diff --git a/erigon-lib/gointerfaces/remoteproto/bor.pb.go b/erigon-lib/gointerfaces/remoteproto/bor.pb.go
index 36de2105028..90b7abdab33 100644
--- a/erigon-lib/gointerfaces/remoteproto/bor.pb.go
+++ b/erigon-lib/gointerfaces/remoteproto/bor.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.36.4
+// protoc-gen-go v1.36.3
// protoc v5.29.3
// source: remote/bor.proto
@@ -13,7 +13,6 @@ import (
emptypb "google.golang.org/protobuf/types/known/emptypb"
reflect "reflect"
sync "sync"
- unsafe "unsafe"
)
const (
@@ -389,7 +388,7 @@ func (x *Validator) GetProposerPriority() int64 {
var File_remote_bor_proto protoreflect.FileDescriptor
-var file_remote_bor_proto_rawDesc = string([]byte{
+var file_remote_bor_proto_rawDesc = []byte{
0x0a, 0x10, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2f, 0x62, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x12, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74,
@@ -462,16 +461,16 @@ var file_remote_bor_proto_rawDesc = string([]byte{
0x75, 0x63, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x16, 0x5a,
0x14, 0x2e, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x3b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-})
+}
var (
file_remote_bor_proto_rawDescOnce sync.Once
- file_remote_bor_proto_rawDescData []byte
+ file_remote_bor_proto_rawDescData = file_remote_bor_proto_rawDesc
)
func file_remote_bor_proto_rawDescGZIP() []byte {
file_remote_bor_proto_rawDescOnce.Do(func() {
- file_remote_bor_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_remote_bor_proto_rawDesc), len(file_remote_bor_proto_rawDesc)))
+ file_remote_bor_proto_rawDescData = protoimpl.X.CompressGZIP(file_remote_bor_proto_rawDescData)
})
return file_remote_bor_proto_rawDescData
}
@@ -522,7 +521,7 @@ func file_remote_bor_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: unsafe.Slice(unsafe.StringData(file_remote_bor_proto_rawDesc), len(file_remote_bor_proto_rawDesc)),
+ RawDescriptor: file_remote_bor_proto_rawDesc,
NumEnums: 0,
NumMessages: 7,
NumExtensions: 0,
@@ -533,6 +532,7 @@ func file_remote_bor_proto_init() {
MessageInfos: file_remote_bor_proto_msgTypes,
}.Build()
File_remote_bor_proto = out.File
+ file_remote_bor_proto_rawDesc = nil
file_remote_bor_proto_goTypes = nil
file_remote_bor_proto_depIdxs = nil
}
diff --git a/erigon-lib/gointerfaces/remoteproto/ethbackend.pb.go b/erigon-lib/gointerfaces/remoteproto/ethbackend.pb.go
index 15cf024dee0..f532eed81bc 100644
--- a/erigon-lib/gointerfaces/remoteproto/ethbackend.pb.go
+++ b/erigon-lib/gointerfaces/remoteproto/ethbackend.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.36.4
+// protoc-gen-go v1.36.3
// protoc v5.29.3
// source: remote/ethbackend.proto
@@ -13,7 +13,6 @@ import (
emptypb "google.golang.org/protobuf/types/known/emptypb"
reflect "reflect"
sync "sync"
- unsafe "unsafe"
)
const (
@@ -1704,7 +1703,7 @@ func (x *SyncingReply_StageProgress) GetBlockNumber() uint64 {
var File_remote_ethbackend_proto protoreflect.FileDescriptor
-var file_remote_ethbackend_proto_rawDesc = string([]byte{
+var file_remote_ethbackend_proto_rawDesc = []byte{
0x0a, 0x17, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2f, 0x65, 0x74, 0x68, 0x62, 0x61, 0x63, 0x6b,
0x65, 0x6e, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x74,
0x65, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
@@ -1955,16 +1954,16 @@ var file_remote_ethbackend_proto_rawDesc = string([]byte{
0x70, 0x6c, 0x79, 0x42, 0x16, 0x5a, 0x14, 0x2e, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x3b,
0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x33,
-})
+}
var (
file_remote_ethbackend_proto_rawDescOnce sync.Once
- file_remote_ethbackend_proto_rawDescData []byte
+ file_remote_ethbackend_proto_rawDescData = file_remote_ethbackend_proto_rawDesc
)
func file_remote_ethbackend_proto_rawDescGZIP() []byte {
file_remote_ethbackend_proto_rawDescOnce.Do(func() {
- file_remote_ethbackend_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_remote_ethbackend_proto_rawDesc), len(file_remote_ethbackend_proto_rawDesc)))
+ file_remote_ethbackend_proto_rawDescData = protoimpl.X.CompressGZIP(file_remote_ethbackend_proto_rawDescData)
})
return file_remote_ethbackend_proto_rawDescData
}
@@ -2094,7 +2093,7 @@ func file_remote_ethbackend_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: unsafe.Slice(unsafe.StringData(file_remote_ethbackend_proto_rawDesc), len(file_remote_ethbackend_proto_rawDesc)),
+ RawDescriptor: file_remote_ethbackend_proto_rawDesc,
NumEnums: 1,
NumMessages: 34,
NumExtensions: 0,
@@ -2106,6 +2105,7 @@ func file_remote_ethbackend_proto_init() {
MessageInfos: file_remote_ethbackend_proto_msgTypes,
}.Build()
File_remote_ethbackend_proto = out.File
+ file_remote_ethbackend_proto_rawDesc = nil
file_remote_ethbackend_proto_goTypes = nil
file_remote_ethbackend_proto_depIdxs = nil
}
diff --git a/erigon-lib/gointerfaces/remoteproto/kv.pb.go b/erigon-lib/gointerfaces/remoteproto/kv.pb.go
index 417ce152cdc..1239ed57759 100644
--- a/erigon-lib/gointerfaces/remoteproto/kv.pb.go
+++ b/erigon-lib/gointerfaces/remoteproto/kv.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.36.4
+// protoc-gen-go v1.36.3
// protoc v5.29.3
// source: remote/kv.proto
@@ -13,7 +13,6 @@ import (
emptypb "google.golang.org/protobuf/types/known/emptypb"
reflect "reflect"
sync "sync"
- unsafe "unsafe"
)
const (
@@ -918,6 +917,104 @@ func (x *RangeReq) GetPageToken() string {
return ""
}
+// `kv.Sequence` method
+type SequenceReq struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ TxId uint64 `protobuf:"varint,1,opt,name=tx_id,json=txId,proto3" json:"tx_id,omitempty"` // returned by .Tx()
+ // query params
+ Table string `protobuf:"bytes,2,opt,name=table,proto3" json:"table,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *SequenceReq) Reset() {
+ *x = SequenceReq{}
+ mi := &file_remote_kv_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SequenceReq) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SequenceReq) ProtoMessage() {}
+
+func (x *SequenceReq) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_kv_proto_msgTypes[10]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SequenceReq.ProtoReflect.Descriptor instead.
+func (*SequenceReq) Descriptor() ([]byte, []int) {
+ return file_remote_kv_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *SequenceReq) GetTxId() uint64 {
+ if x != nil {
+ return x.TxId
+ }
+ return 0
+}
+
+func (x *SequenceReq) GetTable() string {
+ if x != nil {
+ return x.Table
+ }
+ return ""
+}
+
+type SequenceReply struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *SequenceReply) Reset() {
+ *x = SequenceReply{}
+ mi := &file_remote_kv_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SequenceReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SequenceReply) ProtoMessage() {}
+
+func (x *SequenceReply) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_kv_proto_msgTypes[11]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SequenceReply.ProtoReflect.Descriptor instead.
+func (*SequenceReply) Descriptor() ([]byte, []int) {
+ return file_remote_kv_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *SequenceReply) GetValue() uint64 {
+ if x != nil {
+ return x.Value
+ }
+ return 0
+}
+
// Temporal methods
type GetLatestReq struct {
state protoimpl.MessageState `protogen:"open.v1"`
@@ -934,7 +1031,7 @@ type GetLatestReq struct {
func (x *GetLatestReq) Reset() {
*x = GetLatestReq{}
- mi := &file_remote_kv_proto_msgTypes[10]
+ mi := &file_remote_kv_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -946,7 +1043,7 @@ func (x *GetLatestReq) String() string {
func (*GetLatestReq) ProtoMessage() {}
func (x *GetLatestReq) ProtoReflect() protoreflect.Message {
- mi := &file_remote_kv_proto_msgTypes[10]
+ mi := &file_remote_kv_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -959,7 +1056,7 @@ func (x *GetLatestReq) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetLatestReq.ProtoReflect.Descriptor instead.
func (*GetLatestReq) Descriptor() ([]byte, []int) {
- return file_remote_kv_proto_rawDescGZIP(), []int{10}
+ return file_remote_kv_proto_rawDescGZIP(), []int{12}
}
func (x *GetLatestReq) GetTxId() uint64 {
@@ -1014,7 +1111,7 @@ type GetLatestReply struct {
func (x *GetLatestReply) Reset() {
*x = GetLatestReply{}
- mi := &file_remote_kv_proto_msgTypes[11]
+ mi := &file_remote_kv_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1026,7 +1123,7 @@ func (x *GetLatestReply) String() string {
func (*GetLatestReply) ProtoMessage() {}
func (x *GetLatestReply) ProtoReflect() protoreflect.Message {
- mi := &file_remote_kv_proto_msgTypes[11]
+ mi := &file_remote_kv_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1039,7 +1136,7 @@ func (x *GetLatestReply) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetLatestReply.ProtoReflect.Descriptor instead.
func (*GetLatestReply) Descriptor() ([]byte, []int) {
- return file_remote_kv_proto_rawDescGZIP(), []int{11}
+ return file_remote_kv_proto_rawDescGZIP(), []int{13}
}
func (x *GetLatestReply) GetV() []byte {
@@ -1068,7 +1165,7 @@ type HistorySeekReq struct {
func (x *HistorySeekReq) Reset() {
*x = HistorySeekReq{}
- mi := &file_remote_kv_proto_msgTypes[12]
+ mi := &file_remote_kv_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1080,7 +1177,7 @@ func (x *HistorySeekReq) String() string {
func (*HistorySeekReq) ProtoMessage() {}
func (x *HistorySeekReq) ProtoReflect() protoreflect.Message {
- mi := &file_remote_kv_proto_msgTypes[12]
+ mi := &file_remote_kv_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1093,7 +1190,7 @@ func (x *HistorySeekReq) ProtoReflect() protoreflect.Message {
// Deprecated: Use HistorySeekReq.ProtoReflect.Descriptor instead.
func (*HistorySeekReq) Descriptor() ([]byte, []int) {
- return file_remote_kv_proto_rawDescGZIP(), []int{12}
+ return file_remote_kv_proto_rawDescGZIP(), []int{14}
}
func (x *HistorySeekReq) GetTxId() uint64 {
@@ -1134,7 +1231,7 @@ type HistorySeekReply struct {
func (x *HistorySeekReply) Reset() {
*x = HistorySeekReply{}
- mi := &file_remote_kv_proto_msgTypes[13]
+ mi := &file_remote_kv_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1146,7 +1243,7 @@ func (x *HistorySeekReply) String() string {
func (*HistorySeekReply) ProtoMessage() {}
func (x *HistorySeekReply) ProtoReflect() protoreflect.Message {
- mi := &file_remote_kv_proto_msgTypes[13]
+ mi := &file_remote_kv_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1159,7 +1256,7 @@ func (x *HistorySeekReply) ProtoReflect() protoreflect.Message {
// Deprecated: Use HistorySeekReply.ProtoReflect.Descriptor instead.
func (*HistorySeekReply) Descriptor() ([]byte, []int) {
- return file_remote_kv_proto_rawDescGZIP(), []int{13}
+ return file_remote_kv_proto_rawDescGZIP(), []int{15}
}
func (x *HistorySeekReply) GetV() []byte {
@@ -1195,7 +1292,7 @@ type IndexRangeReq struct {
func (x *IndexRangeReq) Reset() {
*x = IndexRangeReq{}
- mi := &file_remote_kv_proto_msgTypes[14]
+ mi := &file_remote_kv_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1207,7 +1304,7 @@ func (x *IndexRangeReq) String() string {
func (*IndexRangeReq) ProtoMessage() {}
func (x *IndexRangeReq) ProtoReflect() protoreflect.Message {
- mi := &file_remote_kv_proto_msgTypes[14]
+ mi := &file_remote_kv_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1220,7 +1317,7 @@ func (x *IndexRangeReq) ProtoReflect() protoreflect.Message {
// Deprecated: Use IndexRangeReq.ProtoReflect.Descriptor instead.
func (*IndexRangeReq) Descriptor() ([]byte, []int) {
- return file_remote_kv_proto_rawDescGZIP(), []int{14}
+ return file_remote_kv_proto_rawDescGZIP(), []int{16}
}
func (x *IndexRangeReq) GetTxId() uint64 {
@@ -1296,7 +1393,7 @@ type IndexRangeReply struct {
func (x *IndexRangeReply) Reset() {
*x = IndexRangeReply{}
- mi := &file_remote_kv_proto_msgTypes[15]
+ mi := &file_remote_kv_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1308,7 +1405,7 @@ func (x *IndexRangeReply) String() string {
func (*IndexRangeReply) ProtoMessage() {}
func (x *IndexRangeReply) ProtoReflect() protoreflect.Message {
- mi := &file_remote_kv_proto_msgTypes[15]
+ mi := &file_remote_kv_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1321,7 +1418,7 @@ func (x *IndexRangeReply) ProtoReflect() protoreflect.Message {
// Deprecated: Use IndexRangeReply.ProtoReflect.Descriptor instead.
func (*IndexRangeReply) Descriptor() ([]byte, []int) {
- return file_remote_kv_proto_rawDescGZIP(), []int{15}
+ return file_remote_kv_proto_rawDescGZIP(), []int{17}
}
func (x *IndexRangeReply) GetTimestamps() []uint64 {
@@ -1356,7 +1453,7 @@ type HistoryRangeReq struct {
func (x *HistoryRangeReq) Reset() {
*x = HistoryRangeReq{}
- mi := &file_remote_kv_proto_msgTypes[16]
+ mi := &file_remote_kv_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1368,7 +1465,7 @@ func (x *HistoryRangeReq) String() string {
func (*HistoryRangeReq) ProtoMessage() {}
func (x *HistoryRangeReq) ProtoReflect() protoreflect.Message {
- mi := &file_remote_kv_proto_msgTypes[16]
+ mi := &file_remote_kv_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1381,7 +1478,7 @@ func (x *HistoryRangeReq) ProtoReflect() protoreflect.Message {
// Deprecated: Use HistoryRangeReq.ProtoReflect.Descriptor instead.
func (*HistoryRangeReq) Descriptor() ([]byte, []int) {
- return file_remote_kv_proto_rawDescGZIP(), []int{16}
+ return file_remote_kv_proto_rawDescGZIP(), []int{18}
}
func (x *HistoryRangeReq) GetTxId() uint64 {
@@ -1460,7 +1557,7 @@ type RangeAsOfReq struct {
func (x *RangeAsOfReq) Reset() {
*x = RangeAsOfReq{}
- mi := &file_remote_kv_proto_msgTypes[17]
+ mi := &file_remote_kv_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1472,7 +1569,7 @@ func (x *RangeAsOfReq) String() string {
func (*RangeAsOfReq) ProtoMessage() {}
func (x *RangeAsOfReq) ProtoReflect() protoreflect.Message {
- mi := &file_remote_kv_proto_msgTypes[17]
+ mi := &file_remote_kv_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1485,7 +1582,7 @@ func (x *RangeAsOfReq) ProtoReflect() protoreflect.Message {
// Deprecated: Use RangeAsOfReq.ProtoReflect.Descriptor instead.
func (*RangeAsOfReq) Descriptor() ([]byte, []int) {
- return file_remote_kv_proto_rawDescGZIP(), []int{17}
+ return file_remote_kv_proto_rawDescGZIP(), []int{19}
}
func (x *RangeAsOfReq) GetTxId() uint64 {
@@ -1569,7 +1666,7 @@ type Pairs struct {
func (x *Pairs) Reset() {
*x = Pairs{}
- mi := &file_remote_kv_proto_msgTypes[18]
+ mi := &file_remote_kv_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1581,7 +1678,7 @@ func (x *Pairs) String() string {
func (*Pairs) ProtoMessage() {}
func (x *Pairs) ProtoReflect() protoreflect.Message {
- mi := &file_remote_kv_proto_msgTypes[18]
+ mi := &file_remote_kv_proto_msgTypes[20]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1594,7 +1691,7 @@ func (x *Pairs) ProtoReflect() protoreflect.Message {
// Deprecated: Use Pairs.ProtoReflect.Descriptor instead.
func (*Pairs) Descriptor() ([]byte, []int) {
- return file_remote_kv_proto_rawDescGZIP(), []int{18}
+ return file_remote_kv_proto_rawDescGZIP(), []int{20}
}
func (x *Pairs) GetKeys() [][]byte {
@@ -1628,7 +1725,7 @@ type PairsPagination struct {
func (x *PairsPagination) Reset() {
*x = PairsPagination{}
- mi := &file_remote_kv_proto_msgTypes[19]
+ mi := &file_remote_kv_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1640,7 +1737,7 @@ func (x *PairsPagination) String() string {
func (*PairsPagination) ProtoMessage() {}
func (x *PairsPagination) ProtoReflect() protoreflect.Message {
- mi := &file_remote_kv_proto_msgTypes[19]
+ mi := &file_remote_kv_proto_msgTypes[21]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1653,7 +1750,7 @@ func (x *PairsPagination) ProtoReflect() protoreflect.Message {
// Deprecated: Use PairsPagination.ProtoReflect.Descriptor instead.
func (*PairsPagination) Descriptor() ([]byte, []int) {
- return file_remote_kv_proto_rawDescGZIP(), []int{19}
+ return file_remote_kv_proto_rawDescGZIP(), []int{21}
}
func (x *PairsPagination) GetNextKey() []byte {
@@ -1680,7 +1777,7 @@ type IndexPagination struct {
func (x *IndexPagination) Reset() {
*x = IndexPagination{}
- mi := &file_remote_kv_proto_msgTypes[20]
+ mi := &file_remote_kv_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1692,7 +1789,7 @@ func (x *IndexPagination) String() string {
func (*IndexPagination) ProtoMessage() {}
func (x *IndexPagination) ProtoReflect() protoreflect.Message {
- mi := &file_remote_kv_proto_msgTypes[20]
+ mi := &file_remote_kv_proto_msgTypes[22]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1705,7 +1802,7 @@ func (x *IndexPagination) ProtoReflect() protoreflect.Message {
// Deprecated: Use IndexPagination.ProtoReflect.Descriptor instead.
func (*IndexPagination) Descriptor() ([]byte, []int) {
- return file_remote_kv_proto_rawDescGZIP(), []int{20}
+ return file_remote_kv_proto_rawDescGZIP(), []int{22}
}
func (x *IndexPagination) GetNextTimeStamp() int64 {
@@ -1724,7 +1821,7 @@ func (x *IndexPagination) GetLimit() int64 {
var File_remote_kv_proto protoreflect.FileDescriptor
-var file_remote_kv_proto_rawDesc = string([]byte{
+var file_remote_kv_proto_rawDesc = []byte{
0x0a, 0x0f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2f, 0x6b, 0x76, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x12, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79,
@@ -1826,170 +1923,179 @@ var file_remote_kv_proto_rawDesc = string([]byte{
0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53,
0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65,
0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b,
- 0x65, 0x6e, 0x22, 0x7f, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x52,
- 0x65, 0x71, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x04, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x0c, 0x0a,
- 0x01, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x6b, 0x12, 0x0e, 0x0a, 0x02, 0x74,
- 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x74, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x6b,
- 0x32, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x6b, 0x32, 0x12, 0x16, 0x0a, 0x06, 0x6c,
- 0x61, 0x74, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x6c, 0x61, 0x74,
- 0x65, 0x73, 0x74, 0x22, 0x2e, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74,
- 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x0c, 0x0a, 0x01, 0x76, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c,
- 0x52, 0x01, 0x76, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52,
- 0x02, 0x6f, 0x6b, 0x22, 0x59, 0x0a, 0x0e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x53, 0x65,
- 0x65, 0x6b, 0x52, 0x65, 0x71, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61,
- 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65,
- 0x12, 0x0c, 0x0a, 0x01, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x6b, 0x12, 0x0e,
- 0x0a, 0x02, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x74, 0x73, 0x22, 0x30,
- 0x0a, 0x10, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x53, 0x65, 0x65, 0x6b, 0x52, 0x65, 0x70,
- 0x6c, 0x79, 0x12, 0x0c, 0x0a, 0x01, 0x76, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x76,
- 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x02, 0x6f, 0x6b,
- 0x22, 0xeb, 0x01, 0x0a, 0x0d, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52,
- 0x65, 0x71, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x04, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x0c, 0x0a,
- 0x01, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x6b, 0x12, 0x17, 0x0a, 0x07, 0x66,
- 0x72, 0x6f, 0x6d, 0x5f, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x12, 0x52, 0x06, 0x66, 0x72,
- 0x6f, 0x6d, 0x54, 0x73, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x6f, 0x5f, 0x74, 0x73, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x12, 0x52, 0x04, 0x74, 0x6f, 0x54, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x72, 0x64,
- 0x65, 0x72, 0x5f, 0x61, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52,
- 0x0b, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05,
- 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d,
- 0x69, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18,
- 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12,
- 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x09, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x59,
- 0x0a, 0x0f, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x70, 0x6c,
- 0x79, 0x12, 0x1e, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x18,
- 0x01, 0x20, 0x03, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
- 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74,
- 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74,
- 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xdf, 0x01, 0x0a, 0x0f, 0x48, 0x69,
- 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x12, 0x13, 0x0a,
+ 0x65, 0x6e, 0x22, 0x38, 0x0a, 0x0b, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65,
+ 0x71, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x25, 0x0a, 0x0d,
+ 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x14, 0x0a,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x22, 0x7f, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74,
+ 0x52, 0x65, 0x71, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x0c,
+ 0x0a, 0x01, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x6b, 0x12, 0x0e, 0x0a, 0x02,
+ 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x74, 0x73, 0x12, 0x0e, 0x0a, 0x02,
+ 0x6b, 0x32, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x6b, 0x32, 0x12, 0x16, 0x0a, 0x06,
+ 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x6c, 0x61,
+ 0x74, 0x65, 0x73, 0x74, 0x22, 0x2e, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73,
+ 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x0c, 0x0a, 0x01, 0x76, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0c, 0x52, 0x01, 0x76, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x02, 0x6f, 0x6b, 0x22, 0x59, 0x0a, 0x0e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x53,
+ 0x65, 0x65, 0x6b, 0x52, 0x65, 0x71, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74,
+ 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x12, 0x0c, 0x0a, 0x01, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x6b, 0x12,
+ 0x0e, 0x0a, 0x02, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x74, 0x73, 0x22,
+ 0x30, 0x0a, 0x10, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x53, 0x65, 0x65, 0x6b, 0x52, 0x65,
+ 0x70, 0x6c, 0x79, 0x12, 0x0c, 0x0a, 0x01, 0x76, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01,
+ 0x76, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x02, 0x6f,
+ 0x6b, 0x22, 0xeb, 0x01, 0x0a, 0x0d, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65,
+ 0x52, 0x65, 0x71, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x0c,
+ 0x0a, 0x01, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x6b, 0x12, 0x17, 0x0a, 0x07,
+ 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x12, 0x52, 0x06, 0x66,
+ 0x72, 0x6f, 0x6d, 0x54, 0x73, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x6f, 0x5f, 0x74, 0x73, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x12, 0x52, 0x04, 0x74, 0x6f, 0x54, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x72,
+ 0x64, 0x65, 0x72, 0x5f, 0x61, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x0b, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x12, 0x14, 0x0a,
+ 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69,
+ 0x6d, 0x69, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65,
+ 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65,
+ 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x09,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22,
+ 0x59, 0x0a, 0x0f, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x70,
+ 0x6c, 0x79, 0x12, 0x1e, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
+ 0x70, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f,
+ 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78,
+ 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xdf, 0x01, 0x0a, 0x0f, 0x48,
+ 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x12, 0x13,
+ 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74,
+ 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x72, 0x6f,
+ 0x6d, 0x5f, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x12, 0x52, 0x06, 0x66, 0x72, 0x6f, 0x6d,
+ 0x54, 0x73, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x6f, 0x5f, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x12, 0x52, 0x04, 0x74, 0x6f, 0x54, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x72, 0x64, 0x65, 0x72,
+ 0x5f, 0x61, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6f,
+ 0x72, 0x64, 0x65, 0x72, 0x41, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69,
+ 0x6d, 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74,
+ 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20,
+ 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a,
+ 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x88, 0x02, 0x0a,
+ 0x0c, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x41, 0x73, 0x4f, 0x66, 0x52, 0x65, 0x71, 0x12, 0x13, 0x0a,
0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, 0x78,
0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x72, 0x6f, 0x6d,
- 0x5f, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x12, 0x52, 0x06, 0x66, 0x72, 0x6f, 0x6d, 0x54,
- 0x73, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x6f, 0x5f, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x12,
- 0x52, 0x04, 0x74, 0x6f, 0x54, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f,
- 0x61, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6f, 0x72,
- 0x64, 0x65, 0x72, 0x41, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d,
- 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12,
- 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01,
- 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a,
- 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x88, 0x02, 0x0a, 0x0c,
- 0x52, 0x61, 0x6e, 0x67, 0x65, 0x41, 0x73, 0x4f, 0x66, 0x52, 0x65, 0x71, 0x12, 0x13, 0x0a, 0x05,
- 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, 0x78, 0x49,
- 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x66, 0x72, 0x6f, 0x6d, 0x5f,
- 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x66, 0x72, 0x6f, 0x6d, 0x4b,
- 0x65, 0x79, 0x12, 0x15, 0x0a, 0x06, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01,
- 0x28, 0x0c, 0x52, 0x05, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x74, 0x73, 0x18,
- 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x74,
- 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x6c, 0x61, 0x74, 0x65, 0x73,
- 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x73, 0x63, 0x65, 0x6e,
- 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x73,
- 0x63, 0x65, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x08, 0x20,
- 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61,
- 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70,
- 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f,
- 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67,
- 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x5b, 0x0a, 0x05, 0x50, 0x61, 0x69, 0x72, 0x73, 0x12,
- 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x04, 0x6b,
- 0x65, 0x79, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x02, 0x20,
- 0x03, 0x28, 0x0c, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e,
- 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f,
- 0x6b, 0x65, 0x6e, 0x22, 0x42, 0x0a, 0x0f, 0x50, 0x61, 0x69, 0x72, 0x73, 0x50, 0x61, 0x67, 0x69,
- 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x6b,
- 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6e, 0x65, 0x78, 0x74, 0x4b, 0x65,
- 0x79, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x12,
- 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x4f, 0x0a, 0x0f, 0x49, 0x6e, 0x64, 0x65, 0x78,
- 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65,
- 0x78, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x12, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x74, 0x61,
- 0x6d, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x2a, 0xfb, 0x01, 0x0a, 0x02, 0x4f, 0x70, 0x12,
- 0x09, 0x0a, 0x05, 0x46, 0x49, 0x52, 0x53, 0x54, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x46, 0x49,
- 0x52, 0x53, 0x54, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x45, 0x45,
- 0x4b, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x45, 0x45, 0x4b, 0x5f, 0x42, 0x4f, 0x54, 0x48,
- 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x55, 0x52, 0x52, 0x45, 0x4e, 0x54, 0x10, 0x04, 0x12,
- 0x08, 0x0a, 0x04, 0x4c, 0x41, 0x53, 0x54, 0x10, 0x06, 0x12, 0x0c, 0x0a, 0x08, 0x4c, 0x41, 0x53,
- 0x54, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x07, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x45, 0x58, 0x54, 0x10,
- 0x08, 0x12, 0x0c, 0x0a, 0x08, 0x4e, 0x45, 0x58, 0x54, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x09, 0x12,
- 0x0f, 0x0a, 0x0b, 0x4e, 0x45, 0x58, 0x54, 0x5f, 0x4e, 0x4f, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x0b,
- 0x12, 0x08, 0x0a, 0x04, 0x50, 0x52, 0x45, 0x56, 0x10, 0x0c, 0x12, 0x0c, 0x0a, 0x08, 0x50, 0x52,
- 0x45, 0x56, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x0d, 0x12, 0x0f, 0x0a, 0x0b, 0x50, 0x52, 0x45, 0x56,
- 0x5f, 0x4e, 0x4f, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x0e, 0x12, 0x0e, 0x0a, 0x0a, 0x53, 0x45, 0x45,
- 0x4b, 0x5f, 0x45, 0x58, 0x41, 0x43, 0x54, 0x10, 0x0f, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, 0x45,
- 0x4b, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x5f, 0x45, 0x58, 0x41, 0x43, 0x54, 0x10, 0x10, 0x12, 0x08,
- 0x0a, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x10, 0x1e, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x53,
- 0x45, 0x10, 0x1f, 0x12, 0x11, 0x0a, 0x0d, 0x4f, 0x50, 0x45, 0x4e, 0x5f, 0x44, 0x55, 0x50, 0x5f,
- 0x53, 0x4f, 0x52, 0x54, 0x10, 0x20, 0x2a, 0x48, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x54, 0x4f, 0x52, 0x41, 0x47, 0x45, 0x10, 0x00, 0x12, 0x0a, 0x0a,
- 0x06, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x44,
- 0x45, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, 0x5f, 0x43, 0x4f,
- 0x44, 0x45, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x10, 0x04,
- 0x2a, 0x24, 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a,
- 0x07, 0x46, 0x4f, 0x52, 0x57, 0x41, 0x52, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x4e,
- 0x57, 0x49, 0x4e, 0x44, 0x10, 0x01, 0x32, 0xb9, 0x04, 0x0a, 0x02, 0x4b, 0x56, 0x12, 0x36, 0x0a,
- 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79,
- 0x1a, 0x13, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
- 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, 0x0a, 0x02, 0x54, 0x78, 0x12, 0x0e, 0x2e, 0x72, 0x65,
- 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x43, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x1a, 0x0c, 0x2e, 0x72, 0x65,
- 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x69, 0x72, 0x28, 0x01, 0x30, 0x01, 0x12, 0x46, 0x0a,
- 0x0c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x1a, 0x2e,
- 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e,
- 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x72, 0x65, 0x6d, 0x6f,
- 0x74, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x61,
- 0x74, 0x63, 0x68, 0x30, 0x01, 0x12, 0x3d, 0x0a, 0x09, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f,
- 0x74, 0x73, 0x12, 0x18, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x6e, 0x61, 0x70,
- 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x72,
- 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52,
- 0x65, 0x70, 0x6c, 0x79, 0x12, 0x28, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x10, 0x2e,
- 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x1a,
- 0x0d, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x69, 0x72, 0x73, 0x12, 0x39,
- 0x0a, 0x09, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x12, 0x14, 0x2e, 0x72, 0x65,
- 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x52, 0x65,
- 0x71, 0x1a, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x61,
- 0x74, 0x65, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3f, 0x0a, 0x0b, 0x48, 0x69, 0x73,
- 0x74, 0x6f, 0x72, 0x79, 0x53, 0x65, 0x65, 0x6b, 0x12, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74,
- 0x65, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x53, 0x65, 0x65, 0x6b, 0x52, 0x65, 0x71,
- 0x1a, 0x18, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72,
- 0x79, 0x53, 0x65, 0x65, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3c, 0x0a, 0x0a, 0x49, 0x6e,
- 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74,
- 0x65, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x1a,
- 0x17, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61,
- 0x6e, 0x67, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x36, 0x0a, 0x0c, 0x48, 0x69, 0x73, 0x74,
- 0x6f, 0x72, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x17, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74,
- 0x65, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65,
- 0x71, 0x1a, 0x0d, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x69, 0x72, 0x73,
- 0x12, 0x30, 0x0a, 0x09, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x41, 0x73, 0x4f, 0x66, 0x12, 0x14, 0x2e,
- 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x41, 0x73, 0x4f, 0x66,
- 0x52, 0x65, 0x71, 0x1a, 0x0d, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x69,
- 0x72, 0x73, 0x42, 0x16, 0x5a, 0x14, 0x2e, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x3b, 0x72,
- 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x33,
-})
+ 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x66, 0x72, 0x6f, 0x6d,
+ 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x66, 0x72, 0x6f, 0x6d,
+ 0x4b, 0x65, 0x79, 0x12, 0x15, 0x0a, 0x06, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0c, 0x52, 0x05, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x74, 0x73,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61,
+ 0x74, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x6c, 0x61, 0x74, 0x65,
+ 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x73, 0x63, 0x65,
+ 0x6e, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x41,
+ 0x73, 0x63, 0x65, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x08,
+ 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70,
+ 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08,
+ 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65,
+ 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61,
+ 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x5b, 0x0a, 0x05, 0x50, 0x61, 0x69, 0x72, 0x73,
+ 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x04,
+ 0x6b, 0x65, 0x79, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f,
+ 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x42, 0x0a, 0x0f, 0x50, 0x61, 0x69, 0x72, 0x73, 0x50, 0x61, 0x67,
+ 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x78, 0x74, 0x5f,
+ 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6e, 0x65, 0x78, 0x74, 0x4b,
+ 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x4f, 0x0a, 0x0f, 0x49, 0x6e, 0x64, 0x65,
+ 0x78, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x6e,
+ 0x65, 0x78, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x12, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x74,
+ 0x61, 0x6d, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x2a, 0xfb, 0x01, 0x0a, 0x02, 0x4f, 0x70,
+ 0x12, 0x09, 0x0a, 0x05, 0x46, 0x49, 0x52, 0x53, 0x54, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x46,
+ 0x49, 0x52, 0x53, 0x54, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x45,
+ 0x45, 0x4b, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x45, 0x45, 0x4b, 0x5f, 0x42, 0x4f, 0x54,
+ 0x48, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x55, 0x52, 0x52, 0x45, 0x4e, 0x54, 0x10, 0x04,
+ 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x41, 0x53, 0x54, 0x10, 0x06, 0x12, 0x0c, 0x0a, 0x08, 0x4c, 0x41,
+ 0x53, 0x54, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x07, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x45, 0x58, 0x54,
+ 0x10, 0x08, 0x12, 0x0c, 0x0a, 0x08, 0x4e, 0x45, 0x58, 0x54, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x09,
+ 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x45, 0x58, 0x54, 0x5f, 0x4e, 0x4f, 0x5f, 0x44, 0x55, 0x50, 0x10,
+ 0x0b, 0x12, 0x08, 0x0a, 0x04, 0x50, 0x52, 0x45, 0x56, 0x10, 0x0c, 0x12, 0x0c, 0x0a, 0x08, 0x50,
+ 0x52, 0x45, 0x56, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x0d, 0x12, 0x0f, 0x0a, 0x0b, 0x50, 0x52, 0x45,
+ 0x56, 0x5f, 0x4e, 0x4f, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x0e, 0x12, 0x0e, 0x0a, 0x0a, 0x53, 0x45,
+ 0x45, 0x4b, 0x5f, 0x45, 0x58, 0x41, 0x43, 0x54, 0x10, 0x0f, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45,
+ 0x45, 0x4b, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x5f, 0x45, 0x58, 0x41, 0x43, 0x54, 0x10, 0x10, 0x12,
+ 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x10, 0x1e, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f,
+ 0x53, 0x45, 0x10, 0x1f, 0x12, 0x11, 0x0a, 0x0d, 0x4f, 0x50, 0x45, 0x4e, 0x5f, 0x44, 0x55, 0x50,
+ 0x5f, 0x53, 0x4f, 0x52, 0x54, 0x10, 0x20, 0x2a, 0x48, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x54, 0x4f, 0x52, 0x41, 0x47, 0x45, 0x10, 0x00, 0x12, 0x0a,
+ 0x0a, 0x06, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f,
+ 0x44, 0x45, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, 0x5f, 0x43,
+ 0x4f, 0x44, 0x45, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x10,
+ 0x04, 0x2a, 0x24, 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b,
+ 0x0a, 0x07, 0x46, 0x4f, 0x52, 0x57, 0x41, 0x52, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x55,
+ 0x4e, 0x57, 0x49, 0x4e, 0x44, 0x10, 0x01, 0x32, 0xf1, 0x04, 0x0a, 0x02, 0x4b, 0x56, 0x12, 0x36,
+ 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74,
+ 0x79, 0x1a, 0x13, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, 0x0a, 0x02, 0x54, 0x78, 0x12, 0x0e, 0x2e, 0x72,
+ 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x43, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x1a, 0x0c, 0x2e, 0x72,
+ 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x69, 0x72, 0x28, 0x01, 0x30, 0x01, 0x12, 0x46,
+ 0x0a, 0x0c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x1a,
+ 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61,
+ 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x72, 0x65, 0x6d,
+ 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x42,
+ 0x61, 0x74, 0x63, 0x68, 0x30, 0x01, 0x12, 0x3d, 0x0a, 0x09, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68,
+ 0x6f, 0x74, 0x73, 0x12, 0x18, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x6e, 0x61,
+ 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e,
+ 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73,
+ 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x28, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x10,
+ 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71,
+ 0x1a, 0x0d, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x69, 0x72, 0x73, 0x12,
+ 0x36, 0x0a, 0x08, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x13, 0x2e, 0x72, 0x65,
+ 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71,
+ 0x1a, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e,
+ 0x63, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x39, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x4c, 0x61,
+ 0x74, 0x65, 0x73, 0x74, 0x12, 0x14, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x47, 0x65,
+ 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x52, 0x65, 0x71, 0x1a, 0x16, 0x2e, 0x72, 0x65, 0x6d,
+ 0x6f, 0x74, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x52, 0x65, 0x70,
+ 0x6c, 0x79, 0x12, 0x3f, 0x0a, 0x0b, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x53, 0x65, 0x65,
+ 0x6b, 0x12, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f,
+ 0x72, 0x79, 0x53, 0x65, 0x65, 0x6b, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x72, 0x65, 0x6d, 0x6f,
+ 0x74, 0x65, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x53, 0x65, 0x65, 0x6b, 0x52, 0x65,
+ 0x70, 0x6c, 0x79, 0x12, 0x3c, 0x0a, 0x0a, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67,
+ 0x65, 0x12, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78,
+ 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74,
+ 0x65, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x70, 0x6c,
+ 0x79, 0x12, 0x36, 0x0a, 0x0c, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x61, 0x6e, 0x67,
+ 0x65, 0x12, 0x17, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f,
+ 0x72, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x0d, 0x2e, 0x72, 0x65, 0x6d,
+ 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x69, 0x72, 0x73, 0x12, 0x30, 0x0a, 0x09, 0x52, 0x61, 0x6e,
+ 0x67, 0x65, 0x41, 0x73, 0x4f, 0x66, 0x12, 0x14, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e,
+ 0x52, 0x61, 0x6e, 0x67, 0x65, 0x41, 0x73, 0x4f, 0x66, 0x52, 0x65, 0x71, 0x1a, 0x0d, 0x2e, 0x72,
+ 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x69, 0x72, 0x73, 0x42, 0x16, 0x5a, 0x14, 0x2e,
+ 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x3b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
var (
file_remote_kv_proto_rawDescOnce sync.Once
- file_remote_kv_proto_rawDescData []byte
+ file_remote_kv_proto_rawDescData = file_remote_kv_proto_rawDesc
)
func file_remote_kv_proto_rawDescGZIP() []byte {
file_remote_kv_proto_rawDescOnce.Do(func() {
- file_remote_kv_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_remote_kv_proto_rawDesc), len(file_remote_kv_proto_rawDesc)))
+ file_remote_kv_proto_rawDescData = protoimpl.X.CompressGZIP(file_remote_kv_proto_rawDescData)
})
return file_remote_kv_proto_rawDescData
}
var file_remote_kv_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
-var file_remote_kv_proto_msgTypes = make([]protoimpl.MessageInfo, 21)
+var file_remote_kv_proto_msgTypes = make([]protoimpl.MessageInfo, 23)
var file_remote_kv_proto_goTypes = []any{
(Op)(0), // 0: remote.Op
(Action)(0), // 1: remote.Action
@@ -2004,54 +2110,58 @@ var file_remote_kv_proto_goTypes = []any{
(*SnapshotsRequest)(nil), // 10: remote.SnapshotsRequest
(*SnapshotsReply)(nil), // 11: remote.SnapshotsReply
(*RangeReq)(nil), // 12: remote.RangeReq
- (*GetLatestReq)(nil), // 13: remote.GetLatestReq
- (*GetLatestReply)(nil), // 14: remote.GetLatestReply
- (*HistorySeekReq)(nil), // 15: remote.HistorySeekReq
- (*HistorySeekReply)(nil), // 16: remote.HistorySeekReply
- (*IndexRangeReq)(nil), // 17: remote.IndexRangeReq
- (*IndexRangeReply)(nil), // 18: remote.IndexRangeReply
- (*HistoryRangeReq)(nil), // 19: remote.HistoryRangeReq
- (*RangeAsOfReq)(nil), // 20: remote.RangeAsOfReq
- (*Pairs)(nil), // 21: remote.Pairs
- (*PairsPagination)(nil), // 22: remote.PairsPagination
- (*IndexPagination)(nil), // 23: remote.IndexPagination
- (*typesproto.H256)(nil), // 24: types.H256
- (*typesproto.H160)(nil), // 25: types.H160
- (*emptypb.Empty)(nil), // 26: google.protobuf.Empty
- (*typesproto.VersionReply)(nil), // 27: types.VersionReply
+ (*SequenceReq)(nil), // 13: remote.SequenceReq
+ (*SequenceReply)(nil), // 14: remote.SequenceReply
+ (*GetLatestReq)(nil), // 15: remote.GetLatestReq
+ (*GetLatestReply)(nil), // 16: remote.GetLatestReply
+ (*HistorySeekReq)(nil), // 17: remote.HistorySeekReq
+ (*HistorySeekReply)(nil), // 18: remote.HistorySeekReply
+ (*IndexRangeReq)(nil), // 19: remote.IndexRangeReq
+ (*IndexRangeReply)(nil), // 20: remote.IndexRangeReply
+ (*HistoryRangeReq)(nil), // 21: remote.HistoryRangeReq
+ (*RangeAsOfReq)(nil), // 22: remote.RangeAsOfReq
+ (*Pairs)(nil), // 23: remote.Pairs
+ (*PairsPagination)(nil), // 24: remote.PairsPagination
+ (*IndexPagination)(nil), // 25: remote.IndexPagination
+ (*typesproto.H256)(nil), // 26: types.H256
+ (*typesproto.H160)(nil), // 27: types.H160
+ (*emptypb.Empty)(nil), // 28: google.protobuf.Empty
+ (*typesproto.VersionReply)(nil), // 29: types.VersionReply
}
var file_remote_kv_proto_depIdxs = []int32{
0, // 0: remote.Cursor.op:type_name -> remote.Op
- 24, // 1: remote.StorageChange.location:type_name -> types.H256
- 25, // 2: remote.AccountChange.address:type_name -> types.H160
+ 26, // 1: remote.StorageChange.location:type_name -> types.H256
+ 27, // 2: remote.AccountChange.address:type_name -> types.H160
1, // 3: remote.AccountChange.action:type_name -> remote.Action
5, // 4: remote.AccountChange.storage_changes:type_name -> remote.StorageChange
8, // 5: remote.StateChangeBatch.change_batch:type_name -> remote.StateChange
2, // 6: remote.StateChange.direction:type_name -> remote.Direction
- 24, // 7: remote.StateChange.block_hash:type_name -> types.H256
+ 26, // 7: remote.StateChange.block_hash:type_name -> types.H256
6, // 8: remote.StateChange.changes:type_name -> remote.AccountChange
- 26, // 9: remote.KV.Version:input_type -> google.protobuf.Empty
+ 28, // 9: remote.KV.Version:input_type -> google.protobuf.Empty
3, // 10: remote.KV.Tx:input_type -> remote.Cursor
9, // 11: remote.KV.StateChanges:input_type -> remote.StateChangeRequest
10, // 12: remote.KV.Snapshots:input_type -> remote.SnapshotsRequest
12, // 13: remote.KV.Range:input_type -> remote.RangeReq
- 13, // 14: remote.KV.GetLatest:input_type -> remote.GetLatestReq
- 15, // 15: remote.KV.HistorySeek:input_type -> remote.HistorySeekReq
- 17, // 16: remote.KV.IndexRange:input_type -> remote.IndexRangeReq
- 19, // 17: remote.KV.HistoryRange:input_type -> remote.HistoryRangeReq
- 20, // 18: remote.KV.RangeAsOf:input_type -> remote.RangeAsOfReq
- 27, // 19: remote.KV.Version:output_type -> types.VersionReply
- 4, // 20: remote.KV.Tx:output_type -> remote.Pair
- 7, // 21: remote.KV.StateChanges:output_type -> remote.StateChangeBatch
- 11, // 22: remote.KV.Snapshots:output_type -> remote.SnapshotsReply
- 21, // 23: remote.KV.Range:output_type -> remote.Pairs
- 14, // 24: remote.KV.GetLatest:output_type -> remote.GetLatestReply
- 16, // 25: remote.KV.HistorySeek:output_type -> remote.HistorySeekReply
- 18, // 26: remote.KV.IndexRange:output_type -> remote.IndexRangeReply
- 21, // 27: remote.KV.HistoryRange:output_type -> remote.Pairs
- 21, // 28: remote.KV.RangeAsOf:output_type -> remote.Pairs
- 19, // [19:29] is the sub-list for method output_type
- 9, // [9:19] is the sub-list for method input_type
+ 13, // 14: remote.KV.Sequence:input_type -> remote.SequenceReq
+ 15, // 15: remote.KV.GetLatest:input_type -> remote.GetLatestReq
+ 17, // 16: remote.KV.HistorySeek:input_type -> remote.HistorySeekReq
+ 19, // 17: remote.KV.IndexRange:input_type -> remote.IndexRangeReq
+ 21, // 18: remote.KV.HistoryRange:input_type -> remote.HistoryRangeReq
+ 22, // 19: remote.KV.RangeAsOf:input_type -> remote.RangeAsOfReq
+ 29, // 20: remote.KV.Version:output_type -> types.VersionReply
+ 4, // 21: remote.KV.Tx:output_type -> remote.Pair
+ 7, // 22: remote.KV.StateChanges:output_type -> remote.StateChangeBatch
+ 11, // 23: remote.KV.Snapshots:output_type -> remote.SnapshotsReply
+ 23, // 24: remote.KV.Range:output_type -> remote.Pairs
+ 14, // 25: remote.KV.Sequence:output_type -> remote.SequenceReply
+ 16, // 26: remote.KV.GetLatest:output_type -> remote.GetLatestReply
+ 18, // 27: remote.KV.HistorySeek:output_type -> remote.HistorySeekReply
+ 20, // 28: remote.KV.IndexRange:output_type -> remote.IndexRangeReply
+ 23, // 29: remote.KV.HistoryRange:output_type -> remote.Pairs
+ 23, // 30: remote.KV.RangeAsOf:output_type -> remote.Pairs
+ 20, // [20:31] is the sub-list for method output_type
+ 9, // [9:20] is the sub-list for method input_type
9, // [9:9] is the sub-list for extension type_name
9, // [9:9] is the sub-list for extension extendee
0, // [0:9] is the sub-list for field type_name
@@ -2066,9 +2176,9 @@ func file_remote_kv_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: unsafe.Slice(unsafe.StringData(file_remote_kv_proto_rawDesc), len(file_remote_kv_proto_rawDesc)),
+ RawDescriptor: file_remote_kv_proto_rawDesc,
NumEnums: 3,
- NumMessages: 21,
+ NumMessages: 23,
NumExtensions: 0,
NumServices: 1,
},
@@ -2078,6 +2188,7 @@ func file_remote_kv_proto_init() {
MessageInfos: file_remote_kv_proto_msgTypes,
}.Build()
File_remote_kv_proto = out.File
+ file_remote_kv_proto_rawDesc = nil
file_remote_kv_proto_goTypes = nil
file_remote_kv_proto_depIdxs = nil
}
diff --git a/erigon-lib/gointerfaces/remoteproto/kv_client_mock.go b/erigon-lib/gointerfaces/remoteproto/kv_client_mock.go
index ec6878eea3a..ea9c953c548 100644
--- a/erigon-lib/gointerfaces/remoteproto/kv_client_mock.go
+++ b/erigon-lib/gointerfaces/remoteproto/kv_client_mock.go
@@ -307,6 +307,50 @@ func (c *MockKVClientRangeAsOfCall) DoAndReturn(f func(context.Context, *RangeAs
return c
}
+// Sequence mocks base method.
+func (m *MockKVClient) Sequence(ctx context.Context, in *SequenceReq, opts ...grpc.CallOption) (*SequenceReply, error) {
+ m.ctrl.T.Helper()
+ varargs := []any{ctx, in}
+ for _, a := range opts {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "Sequence", varargs...)
+ ret0, _ := ret[0].(*SequenceReply)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Sequence indicates an expected call of Sequence.
+func (mr *MockKVClientMockRecorder) Sequence(ctx, in any, opts ...any) *MockKVClientSequenceCall {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]any{ctx, in}, opts...)
+ call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Sequence", reflect.TypeOf((*MockKVClient)(nil).Sequence), varargs...)
+ return &MockKVClientSequenceCall{Call: call}
+}
+
+// MockKVClientSequenceCall wrap *gomock.Call
+type MockKVClientSequenceCall struct {
+ *gomock.Call
+}
+
+// Return rewrite *gomock.Call.Return
+func (c *MockKVClientSequenceCall) Return(arg0 *SequenceReply, arg1 error) *MockKVClientSequenceCall {
+ c.Call = c.Call.Return(arg0, arg1)
+ return c
+}
+
+// Do rewrite *gomock.Call.Do
+func (c *MockKVClientSequenceCall) Do(f func(context.Context, *SequenceReq, ...grpc.CallOption) (*SequenceReply, error)) *MockKVClientSequenceCall {
+ c.Call = c.Call.Do(f)
+ return c
+}
+
+// DoAndReturn rewrite *gomock.Call.DoAndReturn
+func (c *MockKVClientSequenceCall) DoAndReturn(f func(context.Context, *SequenceReq, ...grpc.CallOption) (*SequenceReply, error)) *MockKVClientSequenceCall {
+ c.Call = c.Call.DoAndReturn(f)
+ return c
+}
+
// Snapshots mocks base method.
func (m *MockKVClient) Snapshots(ctx context.Context, in *SnapshotsRequest, opts ...grpc.CallOption) (*SnapshotsReply, error) {
m.ctrl.T.Helper()
diff --git a/erigon-lib/gointerfaces/remoteproto/kv_grpc.pb.go b/erigon-lib/gointerfaces/remoteproto/kv_grpc.pb.go
index 95513469db1..06a578a289f 100644
--- a/erigon-lib/gointerfaces/remoteproto/kv_grpc.pb.go
+++ b/erigon-lib/gointerfaces/remoteproto/kv_grpc.pb.go
@@ -26,6 +26,7 @@ const (
KV_StateChanges_FullMethodName = "/remote.KV/StateChanges"
KV_Snapshots_FullMethodName = "/remote.KV/Snapshots"
KV_Range_FullMethodName = "/remote.KV/Range"
+ KV_Sequence_FullMethodName = "/remote.KV/Sequence"
KV_GetLatest_FullMethodName = "/remote.KV/GetLatest"
KV_HistorySeek_FullMethodName = "/remote.KV/HistorySeek"
KV_IndexRange_FullMethodName = "/remote.KV/IndexRange"
@@ -55,6 +56,7 @@ type KVClient interface {
// Range(nil, to) means [StartOfTable, to)
// If orderAscend=false server expecting `from`<`to`. Example: Range("B", "A")
Range(ctx context.Context, in *RangeReq, opts ...grpc.CallOption) (*Pairs, error)
+ Sequence(ctx context.Context, in *SequenceReq, opts ...grpc.CallOption) (*SequenceReply, error)
// Temporal methods
GetLatest(ctx context.Context, in *GetLatestReq, opts ...grpc.CallOption) (*GetLatestReply, error)
HistorySeek(ctx context.Context, in *HistorySeekReq, opts ...grpc.CallOption) (*HistorySeekReply, error)
@@ -133,6 +135,16 @@ func (c *kVClient) Range(ctx context.Context, in *RangeReq, opts ...grpc.CallOpt
return out, nil
}
+func (c *kVClient) Sequence(ctx context.Context, in *SequenceReq, opts ...grpc.CallOption) (*SequenceReply, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(SequenceReply)
+ err := c.cc.Invoke(ctx, KV_Sequence_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
func (c *kVClient) GetLatest(ctx context.Context, in *GetLatestReq, opts ...grpc.CallOption) (*GetLatestReply, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetLatestReply)
@@ -205,6 +217,7 @@ type KVServer interface {
// Range(nil, to) means [StartOfTable, to)
// If orderAscend=false server expecting `from`<`to`. Example: Range("B", "A")
Range(context.Context, *RangeReq) (*Pairs, error)
+ Sequence(context.Context, *SequenceReq) (*SequenceReply, error)
// Temporal methods
GetLatest(context.Context, *GetLatestReq) (*GetLatestReply, error)
HistorySeek(context.Context, *HistorySeekReq) (*HistorySeekReply, error)
@@ -236,6 +249,9 @@ func (UnimplementedKVServer) Snapshots(context.Context, *SnapshotsRequest) (*Sna
func (UnimplementedKVServer) Range(context.Context, *RangeReq) (*Pairs, error) {
return nil, status.Errorf(codes.Unimplemented, "method Range not implemented")
}
+func (UnimplementedKVServer) Sequence(context.Context, *SequenceReq) (*SequenceReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Sequence not implemented")
+}
func (UnimplementedKVServer) GetLatest(context.Context, *GetLatestReq) (*GetLatestReply, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetLatest not implemented")
}
@@ -344,6 +360,24 @@ func _KV_Range_Handler(srv interface{}, ctx context.Context, dec func(interface{
return interceptor(ctx, in, info, handler)
}
+func _KV_Sequence_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SequenceReq)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(KVServer).Sequence(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: KV_Sequence_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(KVServer).Sequence(ctx, req.(*SequenceReq))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _KV_GetLatest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetLatestReq)
if err := dec(in); err != nil {
@@ -453,6 +487,10 @@ var KV_ServiceDesc = grpc.ServiceDesc{
MethodName: "Range",
Handler: _KV_Range_Handler,
},
+ {
+ MethodName: "Sequence",
+ Handler: _KV_Sequence_Handler,
+ },
{
MethodName: "GetLatest",
Handler: _KV_GetLatest_Handler,
diff --git a/erigon-lib/gointerfaces/sentinelproto/sentinel.pb.go b/erigon-lib/gointerfaces/sentinelproto/sentinel.pb.go
index d84bc2620fa..52448fc1155 100644
--- a/erigon-lib/gointerfaces/sentinelproto/sentinel.pb.go
+++ b/erigon-lib/gointerfaces/sentinelproto/sentinel.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.36.4
+// protoc-gen-go v1.36.3
// protoc v5.29.3
// source: p2psentinel/sentinel.proto
@@ -12,7 +12,6 @@ import (
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
- unsafe "unsafe"
)
const (
@@ -804,7 +803,7 @@ func (x *RequestSubscribeExpiry) GetExpiryUnixSecs() uint64 {
var File_p2psentinel_sentinel_proto protoreflect.FileDescriptor
-var file_p2psentinel_sentinel_proto_rawDesc = string([]byte{
+var file_p2psentinel_sentinel_proto_rawDesc = []byte{
0x0a, 0x1a, 0x70, 0x32, 0x70, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2f, 0x73, 0x65,
0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x73, 0x65,
0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x1a, 0x11, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x79,
@@ -949,16 +948,16 @@ var file_p2psentinel_sentinel_proto_rawDesc = string([]byte{
0x73, 0x65, 0x42, 0x1a, 0x5a, 0x18, 0x2e, 0x2f, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c,
0x3b, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-})
+}
var (
file_p2psentinel_sentinel_proto_rawDescOnce sync.Once
- file_p2psentinel_sentinel_proto_rawDescData []byte
+ file_p2psentinel_sentinel_proto_rawDescData = file_p2psentinel_sentinel_proto_rawDesc
)
func file_p2psentinel_sentinel_proto_rawDescGZIP() []byte {
file_p2psentinel_sentinel_proto_rawDescOnce.Do(func() {
- file_p2psentinel_sentinel_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_p2psentinel_sentinel_proto_rawDesc), len(file_p2psentinel_sentinel_proto_rawDesc)))
+ file_p2psentinel_sentinel_proto_rawDescData = protoimpl.X.CompressGZIP(file_p2psentinel_sentinel_proto_rawDescData)
})
return file_p2psentinel_sentinel_proto_rawDescData
}
@@ -1030,7 +1029,7 @@ func file_p2psentinel_sentinel_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: unsafe.Slice(unsafe.StringData(file_p2psentinel_sentinel_proto_rawDesc), len(file_p2psentinel_sentinel_proto_rawDesc)),
+ RawDescriptor: file_p2psentinel_sentinel_proto_rawDesc,
NumEnums: 0,
NumMessages: 13,
NumExtensions: 0,
@@ -1041,6 +1040,7 @@ func file_p2psentinel_sentinel_proto_init() {
MessageInfos: file_p2psentinel_sentinel_proto_msgTypes,
}.Build()
File_p2psentinel_sentinel_proto = out.File
+ file_p2psentinel_sentinel_proto_rawDesc = nil
file_p2psentinel_sentinel_proto_goTypes = nil
file_p2psentinel_sentinel_proto_depIdxs = nil
}
diff --git a/erigon-lib/gointerfaces/sentryproto/sentry.pb.go b/erigon-lib/gointerfaces/sentryproto/sentry.pb.go
index abb6b6e9ecc..b3408a33d3e 100644
--- a/erigon-lib/gointerfaces/sentryproto/sentry.pb.go
+++ b/erigon-lib/gointerfaces/sentryproto/sentry.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.36.4
+// protoc-gen-go v1.36.3
// protoc v5.29.3
// source: p2psentry/sentry.proto
@@ -13,7 +13,6 @@ import (
emptypb "google.golang.org/protobuf/types/known/emptypb"
reflect "reflect"
sync "sync"
- unsafe "unsafe"
)
const (
@@ -1447,7 +1446,7 @@ func (x *AddPeerReply) GetSuccess() bool {
var File_p2psentry_sentry_proto protoreflect.FileDescriptor
-var file_p2psentry_sentry_proto_rawDesc = string([]byte{
+var file_p2psentry_sentry_proto_rawDesc = []byte{
0x0a, 0x16, 0x70, 0x32, 0x70, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2f, 0x73, 0x65, 0x6e, 0x74,
0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79,
0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
@@ -1695,16 +1694,16 @@ var file_p2psentry_sentry_proto_rawDesc = string([]byte{
0x79, 0x42, 0x16, 0x5a, 0x14, 0x2e, 0x2f, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x3b, 0x73, 0x65,
0x6e, 0x74, 0x72, 0x79, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
-})
+}
var (
file_p2psentry_sentry_proto_rawDescOnce sync.Once
- file_p2psentry_sentry_proto_rawDescData []byte
+ file_p2psentry_sentry_proto_rawDescData = file_p2psentry_sentry_proto_rawDesc
)
func file_p2psentry_sentry_proto_rawDescGZIP() []byte {
file_p2psentry_sentry_proto_rawDescOnce.Do(func() {
- file_p2psentry_sentry_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_p2psentry_sentry_proto_rawDesc), len(file_p2psentry_sentry_proto_rawDesc)))
+ file_p2psentry_sentry_proto_rawDescData = protoimpl.X.CompressGZIP(file_p2psentry_sentry_proto_rawDescData)
})
return file_p2psentry_sentry_proto_rawDescData
}
@@ -1817,7 +1816,7 @@ func file_p2psentry_sentry_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: unsafe.Slice(unsafe.StringData(file_p2psentry_sentry_proto_rawDesc), len(file_p2psentry_sentry_proto_rawDesc)),
+ RawDescriptor: file_p2psentry_sentry_proto_rawDesc,
NumEnums: 4,
NumMessages: 23,
NumExtensions: 0,
@@ -1829,6 +1828,7 @@ func file_p2psentry_sentry_proto_init() {
MessageInfos: file_p2psentry_sentry_proto_msgTypes,
}.Build()
File_p2psentry_sentry_proto = out.File
+ file_p2psentry_sentry_proto_rawDesc = nil
file_p2psentry_sentry_proto_goTypes = nil
file_p2psentry_sentry_proto_depIdxs = nil
}
diff --git a/erigon-lib/gointerfaces/txpoolproto/mining.pb.go b/erigon-lib/gointerfaces/txpoolproto/mining.pb.go
index 1d605360522..d5ec261bc70 100644
--- a/erigon-lib/gointerfaces/txpoolproto/mining.pb.go
+++ b/erigon-lib/gointerfaces/txpoolproto/mining.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.36.4
+// protoc-gen-go v1.36.3
// protoc v5.29.3
// source: txpool/mining.proto
@@ -13,7 +13,6 @@ import (
emptypb "google.golang.org/protobuf/types/known/emptypb"
reflect "reflect"
sync "sync"
- unsafe "unsafe"
)
const (
@@ -737,7 +736,7 @@ func (x *MiningReply) GetRunning() bool {
var File_txpool_mining_proto protoreflect.FileDescriptor
-var file_txpool_mining_proto_rawDesc = string([]byte{
+var file_txpool_mining_proto_rawDesc = []byte{
0x0a, 0x13, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2f, 0x6d, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x1a, 0x1b, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65,
@@ -832,16 +831,16 @@ var file_txpool_mining_proto_rawDesc = string([]byte{
0x6c, 0x79, 0x42, 0x16, 0x5a, 0x14, 0x2e, 0x2f, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x3b, 0x74,
0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x33,
-})
+}
var (
file_txpool_mining_proto_rawDescOnce sync.Once
- file_txpool_mining_proto_rawDescData []byte
+ file_txpool_mining_proto_rawDescData = file_txpool_mining_proto_rawDesc
)
func file_txpool_mining_proto_rawDescGZIP() []byte {
file_txpool_mining_proto_rawDescOnce.Do(func() {
- file_txpool_mining_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_txpool_mining_proto_rawDesc), len(file_txpool_mining_proto_rawDesc)))
+ file_txpool_mining_proto_rawDescData = protoimpl.X.CompressGZIP(file_txpool_mining_proto_rawDescData)
})
return file_txpool_mining_proto_rawDescData
}
@@ -902,7 +901,7 @@ func file_txpool_mining_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: unsafe.Slice(unsafe.StringData(file_txpool_mining_proto_rawDesc), len(file_txpool_mining_proto_rawDesc)),
+ RawDescriptor: file_txpool_mining_proto_rawDesc,
NumEnums: 0,
NumMessages: 16,
NumExtensions: 0,
@@ -913,6 +912,7 @@ func file_txpool_mining_proto_init() {
MessageInfos: file_txpool_mining_proto_msgTypes,
}.Build()
File_txpool_mining_proto = out.File
+ file_txpool_mining_proto_rawDesc = nil
file_txpool_mining_proto_goTypes = nil
file_txpool_mining_proto_depIdxs = nil
}
diff --git a/erigon-lib/gointerfaces/txpoolproto/txpool.pb.go b/erigon-lib/gointerfaces/txpoolproto/txpool.pb.go
index 2d07e97474e..9236c44025c 100644
--- a/erigon-lib/gointerfaces/txpoolproto/txpool.pb.go
+++ b/erigon-lib/gointerfaces/txpoolproto/txpool.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.36.4
+// protoc-gen-go v1.36.3
// protoc v5.29.3
// source: txpool/txpool.proto
@@ -13,7 +13,6 @@ import (
emptypb "google.golang.org/protobuf/types/known/emptypb"
reflect "reflect"
sync "sync"
- unsafe "unsafe"
)
const (
@@ -754,6 +753,146 @@ func (x *NonceReply) GetNonce() uint64 {
return 0
}
+type GetBlobsRequest struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ BlobHashes []*typesproto.H256 `protobuf:"bytes,1,rep,name=blob_hashes,json=blobHashes,proto3" json:"blob_hashes,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *GetBlobsRequest) Reset() {
+ *x = GetBlobsRequest{}
+ mi := &file_txpool_txpool_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetBlobsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetBlobsRequest) ProtoMessage() {}
+
+func (x *GetBlobsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_txpool_proto_msgTypes[14]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetBlobsRequest.ProtoReflect.Descriptor instead.
+func (*GetBlobsRequest) Descriptor() ([]byte, []int) {
+ return file_txpool_txpool_proto_rawDescGZIP(), []int{14}
+}
+
+func (x *GetBlobsRequest) GetBlobHashes() []*typesproto.H256 {
+ if x != nil {
+ return x.BlobHashes
+ }
+ return nil
+}
+
+type BlobAndProofV1 struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Blob []byte `protobuf:"bytes,1,opt,name=blob,proto3" json:"blob,omitempty"`
+ Proof []byte `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *BlobAndProofV1) Reset() {
+ *x = BlobAndProofV1{}
+ mi := &file_txpool_txpool_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *BlobAndProofV1) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BlobAndProofV1) ProtoMessage() {}
+
+func (x *BlobAndProofV1) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_txpool_proto_msgTypes[15]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BlobAndProofV1.ProtoReflect.Descriptor instead.
+func (*BlobAndProofV1) Descriptor() ([]byte, []int) {
+ return file_txpool_txpool_proto_rawDescGZIP(), []int{15}
+}
+
+func (x *BlobAndProofV1) GetBlob() []byte {
+ if x != nil {
+ return x.Blob
+ }
+ return nil
+}
+
+func (x *BlobAndProofV1) GetProof() []byte {
+ if x != nil {
+ return x.Proof
+ }
+ return nil
+}
+
+type GetBlobsReply struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ BlobsAndProofs []*BlobAndProofV1 `protobuf:"bytes,1,rep,name=blobs_and_proofs,json=blobsAndProofs,proto3" json:"blobs_and_proofs,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *GetBlobsReply) Reset() {
+ *x = GetBlobsReply{}
+ mi := &file_txpool_txpool_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetBlobsReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetBlobsReply) ProtoMessage() {}
+
+func (x *GetBlobsReply) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_txpool_proto_msgTypes[16]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetBlobsReply.ProtoReflect.Descriptor instead.
+func (*GetBlobsReply) Descriptor() ([]byte, []int) {
+ return file_txpool_txpool_proto_rawDescGZIP(), []int{16}
+}
+
+func (x *GetBlobsReply) GetBlobsAndProofs() []*BlobAndProofV1 {
+ if x != nil {
+ return x.BlobsAndProofs
+ }
+ return nil
+}
+
type AllReply_Tx struct {
state protoimpl.MessageState `protogen:"open.v1"`
TxnType AllReply_TxnType `protobuf:"varint,1,opt,name=txn_type,json=txnType,proto3,enum=txpool.AllReply_TxnType" json:"txn_type,omitempty"`
@@ -765,7 +904,7 @@ type AllReply_Tx struct {
func (x *AllReply_Tx) Reset() {
*x = AllReply_Tx{}
- mi := &file_txpool_txpool_proto_msgTypes[14]
+ mi := &file_txpool_txpool_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -777,7 +916,7 @@ func (x *AllReply_Tx) String() string {
func (*AllReply_Tx) ProtoMessage() {}
func (x *AllReply_Tx) ProtoReflect() protoreflect.Message {
- mi := &file_txpool_txpool_proto_msgTypes[14]
+ mi := &file_txpool_txpool_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -825,7 +964,7 @@ type PendingReply_Tx struct {
func (x *PendingReply_Tx) Reset() {
*x = PendingReply_Tx{}
- mi := &file_txpool_txpool_proto_msgTypes[15]
+ mi := &file_txpool_txpool_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -837,7 +976,7 @@ func (x *PendingReply_Tx) String() string {
func (*PendingReply_Tx) ProtoMessage() {}
func (x *PendingReply_Tx) ProtoReflect() protoreflect.Message {
- mi := &file_txpool_txpool_proto_msgTypes[15]
+ mi := &file_txpool_txpool_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -876,7 +1015,7 @@ func (x *PendingReply_Tx) GetIsLocal() bool {
var File_txpool_txpool_proto protoreflect.FileDescriptor
-var file_txpool_txpool_proto_rawDesc = string([]byte{
+var file_txpool_txpool_proto_rawDesc = []byte{
0x0a, 0x13, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2f, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x1a, 0x1b, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65,
@@ -942,14 +1081,27 @@ var file_txpool_txpool_proto_rawDesc = string([]byte{
0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x38, 0x0a, 0x0a, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x52, 0x65,
0x70, 0x6c, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01,
0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x6e,
- 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x2a,
+ 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x22,
+ 0x3f, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x65,
+ 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e,
+ 0x48, 0x32, 0x35, 0x36, 0x52, 0x0a, 0x62, 0x6c, 0x6f, 0x62, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73,
+ 0x22, 0x3a, 0x0a, 0x0e, 0x42, 0x6c, 0x6f, 0x62, 0x41, 0x6e, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66,
+ 0x56, 0x31, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6c, 0x6f, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c,
+ 0x52, 0x04, 0x62, 0x6c, 0x6f, 0x62, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0x51, 0x0a, 0x0d,
+ 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x40, 0x0a,
+ 0x10, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66,
+ 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c,
+ 0x2e, 0x42, 0x6c, 0x6f, 0x62, 0x41, 0x6e, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x56, 0x31, 0x52,
+ 0x0e, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x41, 0x6e, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x2a,
0x6c, 0x0a, 0x0c, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12,
0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e,
0x41, 0x4c, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x01,
0x12, 0x0f, 0x0a, 0x0b, 0x46, 0x45, 0x45, 0x5f, 0x54, 0x4f, 0x4f, 0x5f, 0x4c, 0x4f, 0x57, 0x10,
0x02, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x54, 0x41, 0x4c, 0x45, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07,
0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x04, 0x12, 0x12, 0x0a, 0x0e, 0x49, 0x4e, 0x54,
- 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x05, 0x32, 0xec, 0x03,
+ 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x05, 0x32, 0xa8, 0x04,
0x0a, 0x06, 0x54, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x12, 0x36, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73,
0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x74, 0x79,
@@ -980,25 +1132,29 @@ var file_txpool_txpool_proto_rawDesc = string([]byte{
0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x31, 0x0a, 0x05, 0x4e, 0x6f, 0x6e,
0x63, 0x65, 0x12, 0x14, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x4e, 0x6f, 0x6e, 0x63,
0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f,
- 0x6c, 0x2e, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x42, 0x16, 0x5a, 0x14,
- 0x2e, 0x2f, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x3b, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-})
+ 0x6c, 0x2e, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3a, 0x0a, 0x08,
+ 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x12, 0x17, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f,
+ 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x15, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c,
+ 0x6f, 0x62, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x42, 0x16, 0x5a, 0x14, 0x2e, 0x2f, 0x74, 0x78,
+ 0x70, 0x6f, 0x6f, 0x6c, 0x3b, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
var (
file_txpool_txpool_proto_rawDescOnce sync.Once
- file_txpool_txpool_proto_rawDescData []byte
+ file_txpool_txpool_proto_rawDescData = file_txpool_txpool_proto_rawDesc
)
func file_txpool_txpool_proto_rawDescGZIP() []byte {
file_txpool_txpool_proto_rawDescOnce.Do(func() {
- file_txpool_txpool_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_txpool_txpool_proto_rawDesc), len(file_txpool_txpool_proto_rawDesc)))
+ file_txpool_txpool_proto_rawDescData = protoimpl.X.CompressGZIP(file_txpool_txpool_proto_rawDescData)
})
return file_txpool_txpool_proto_rawDescData
}
var file_txpool_txpool_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
-var file_txpool_txpool_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
+var file_txpool_txpool_proto_msgTypes = make([]protoimpl.MessageInfo, 19)
var file_txpool_txpool_proto_goTypes = []any{
(ImportResult)(0), // 0: txpool.ImportResult
(AllReply_TxnType)(0), // 1: txpool.AllReply.TxnType
@@ -1016,46 +1172,53 @@ var file_txpool_txpool_proto_goTypes = []any{
(*StatusReply)(nil), // 13: txpool.StatusReply
(*NonceRequest)(nil), // 14: txpool.NonceRequest
(*NonceReply)(nil), // 15: txpool.NonceReply
- (*AllReply_Tx)(nil), // 16: txpool.AllReply.Tx
- (*PendingReply_Tx)(nil), // 17: txpool.PendingReply.Tx
- (*typesproto.H256)(nil), // 18: types.H256
- (*typesproto.H160)(nil), // 19: types.H160
- (*emptypb.Empty)(nil), // 20: google.protobuf.Empty
- (*typesproto.VersionReply)(nil), // 21: types.VersionReply
+ (*GetBlobsRequest)(nil), // 16: txpool.GetBlobsRequest
+ (*BlobAndProofV1)(nil), // 17: txpool.BlobAndProofV1
+ (*GetBlobsReply)(nil), // 18: txpool.GetBlobsReply
+ (*AllReply_Tx)(nil), // 19: txpool.AllReply.Tx
+ (*PendingReply_Tx)(nil), // 20: txpool.PendingReply.Tx
+ (*typesproto.H256)(nil), // 21: types.H256
+ (*typesproto.H160)(nil), // 22: types.H160
+ (*emptypb.Empty)(nil), // 23: google.protobuf.Empty
+ (*typesproto.VersionReply)(nil), // 24: types.VersionReply
}
var file_txpool_txpool_proto_depIdxs = []int32{
- 18, // 0: txpool.TxHashes.hashes:type_name -> types.H256
+ 21, // 0: txpool.TxHashes.hashes:type_name -> types.H256
0, // 1: txpool.AddReply.imported:type_name -> txpool.ImportResult
- 18, // 2: txpool.TransactionsRequest.hashes:type_name -> types.H256
- 16, // 3: txpool.AllReply.txs:type_name -> txpool.AllReply.Tx
- 17, // 4: txpool.PendingReply.txs:type_name -> txpool.PendingReply.Tx
- 19, // 5: txpool.NonceRequest.address:type_name -> types.H160
- 1, // 6: txpool.AllReply.Tx.txn_type:type_name -> txpool.AllReply.TxnType
- 19, // 7: txpool.AllReply.Tx.sender:type_name -> types.H160
- 19, // 8: txpool.PendingReply.Tx.sender:type_name -> types.H160
- 20, // 9: txpool.Txpool.Version:input_type -> google.protobuf.Empty
- 2, // 10: txpool.Txpool.FindUnknown:input_type -> txpool.TxHashes
- 3, // 11: txpool.Txpool.Add:input_type -> txpool.AddRequest
- 5, // 12: txpool.Txpool.Transactions:input_type -> txpool.TransactionsRequest
- 9, // 13: txpool.Txpool.All:input_type -> txpool.AllRequest
- 20, // 14: txpool.Txpool.Pending:input_type -> google.protobuf.Empty
- 7, // 15: txpool.Txpool.OnAdd:input_type -> txpool.OnAddRequest
- 12, // 16: txpool.Txpool.Status:input_type -> txpool.StatusRequest
- 14, // 17: txpool.Txpool.Nonce:input_type -> txpool.NonceRequest
- 21, // 18: txpool.Txpool.Version:output_type -> types.VersionReply
- 2, // 19: txpool.Txpool.FindUnknown:output_type -> txpool.TxHashes
- 4, // 20: txpool.Txpool.Add:output_type -> txpool.AddReply
- 6, // 21: txpool.Txpool.Transactions:output_type -> txpool.TransactionsReply
- 10, // 22: txpool.Txpool.All:output_type -> txpool.AllReply
- 11, // 23: txpool.Txpool.Pending:output_type -> txpool.PendingReply
- 8, // 24: txpool.Txpool.OnAdd:output_type -> txpool.OnAddReply
- 13, // 25: txpool.Txpool.Status:output_type -> txpool.StatusReply
- 15, // 26: txpool.Txpool.Nonce:output_type -> txpool.NonceReply
- 18, // [18:27] is the sub-list for method output_type
- 9, // [9:18] is the sub-list for method input_type
- 9, // [9:9] is the sub-list for extension type_name
- 9, // [9:9] is the sub-list for extension extendee
- 0, // [0:9] is the sub-list for field type_name
+ 21, // 2: txpool.TransactionsRequest.hashes:type_name -> types.H256
+ 19, // 3: txpool.AllReply.txs:type_name -> txpool.AllReply.Tx
+ 20, // 4: txpool.PendingReply.txs:type_name -> txpool.PendingReply.Tx
+ 22, // 5: txpool.NonceRequest.address:type_name -> types.H160
+ 21, // 6: txpool.GetBlobsRequest.blob_hashes:type_name -> types.H256
+ 17, // 7: txpool.GetBlobsReply.blobs_and_proofs:type_name -> txpool.BlobAndProofV1
+ 1, // 8: txpool.AllReply.Tx.txn_type:type_name -> txpool.AllReply.TxnType
+ 22, // 9: txpool.AllReply.Tx.sender:type_name -> types.H160
+ 22, // 10: txpool.PendingReply.Tx.sender:type_name -> types.H160
+ 23, // 11: txpool.Txpool.Version:input_type -> google.protobuf.Empty
+ 2, // 12: txpool.Txpool.FindUnknown:input_type -> txpool.TxHashes
+ 3, // 13: txpool.Txpool.Add:input_type -> txpool.AddRequest
+ 5, // 14: txpool.Txpool.Transactions:input_type -> txpool.TransactionsRequest
+ 9, // 15: txpool.Txpool.All:input_type -> txpool.AllRequest
+ 23, // 16: txpool.Txpool.Pending:input_type -> google.protobuf.Empty
+ 7, // 17: txpool.Txpool.OnAdd:input_type -> txpool.OnAddRequest
+ 12, // 18: txpool.Txpool.Status:input_type -> txpool.StatusRequest
+ 14, // 19: txpool.Txpool.Nonce:input_type -> txpool.NonceRequest
+ 16, // 20: txpool.Txpool.GetBlobs:input_type -> txpool.GetBlobsRequest
+ 24, // 21: txpool.Txpool.Version:output_type -> types.VersionReply
+ 2, // 22: txpool.Txpool.FindUnknown:output_type -> txpool.TxHashes
+ 4, // 23: txpool.Txpool.Add:output_type -> txpool.AddReply
+ 6, // 24: txpool.Txpool.Transactions:output_type -> txpool.TransactionsReply
+ 10, // 25: txpool.Txpool.All:output_type -> txpool.AllReply
+ 11, // 26: txpool.Txpool.Pending:output_type -> txpool.PendingReply
+ 8, // 27: txpool.Txpool.OnAdd:output_type -> txpool.OnAddReply
+ 13, // 28: txpool.Txpool.Status:output_type -> txpool.StatusReply
+ 15, // 29: txpool.Txpool.Nonce:output_type -> txpool.NonceReply
+ 18, // 30: txpool.Txpool.GetBlobs:output_type -> txpool.GetBlobsReply
+ 21, // [21:31] is the sub-list for method output_type
+ 11, // [11:21] is the sub-list for method input_type
+ 11, // [11:11] is the sub-list for extension type_name
+ 11, // [11:11] is the sub-list for extension extendee
+ 0, // [0:11] is the sub-list for field type_name
}
func init() { file_txpool_txpool_proto_init() }
@@ -1067,9 +1230,9 @@ func file_txpool_txpool_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: unsafe.Slice(unsafe.StringData(file_txpool_txpool_proto_rawDesc), len(file_txpool_txpool_proto_rawDesc)),
+ RawDescriptor: file_txpool_txpool_proto_rawDesc,
NumEnums: 2,
- NumMessages: 16,
+ NumMessages: 19,
NumExtensions: 0,
NumServices: 1,
},
@@ -1079,6 +1242,7 @@ func file_txpool_txpool_proto_init() {
MessageInfos: file_txpool_txpool_proto_msgTypes,
}.Build()
File_txpool_txpool_proto = out.File
+ file_txpool_txpool_proto_rawDesc = nil
file_txpool_txpool_proto_goTypes = nil
file_txpool_txpool_proto_depIdxs = nil
}
diff --git a/erigon-lib/gointerfaces/txpoolproto/txpool_grpc.pb.go b/erigon-lib/gointerfaces/txpoolproto/txpool_grpc.pb.go
index dffb62a0f02..c7cce1ea95d 100644
--- a/erigon-lib/gointerfaces/txpoolproto/txpool_grpc.pb.go
+++ b/erigon-lib/gointerfaces/txpoolproto/txpool_grpc.pb.go
@@ -30,6 +30,7 @@ const (
Txpool_OnAdd_FullMethodName = "/txpool.Txpool/OnAdd"
Txpool_Status_FullMethodName = "/txpool.Txpool/Status"
Txpool_Nonce_FullMethodName = "/txpool.Txpool/Nonce"
+ Txpool_GetBlobs_FullMethodName = "/txpool.Txpool/GetBlobs"
)
// TxpoolClient is the client API for Txpool service.
@@ -55,6 +56,8 @@ type TxpoolClient interface {
Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusReply, error)
// returns nonce for given account
Nonce(ctx context.Context, in *NonceRequest, opts ...grpc.CallOption) (*NonceReply, error)
+ // returns the list of blobs and proofs for a given list of blob hashes
+ GetBlobs(ctx context.Context, in *GetBlobsRequest, opts ...grpc.CallOption) (*GetBlobsReply, error)
}
type txpoolClient struct {
@@ -164,6 +167,16 @@ func (c *txpoolClient) Nonce(ctx context.Context, in *NonceRequest, opts ...grpc
return out, nil
}
+func (c *txpoolClient) GetBlobs(ctx context.Context, in *GetBlobsRequest, opts ...grpc.CallOption) (*GetBlobsReply, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(GetBlobsReply)
+ err := c.cc.Invoke(ctx, Txpool_GetBlobs_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
// TxpoolServer is the server API for Txpool service.
// All implementations must embed UnimplementedTxpoolServer
// for forward compatibility.
@@ -187,6 +200,8 @@ type TxpoolServer interface {
Status(context.Context, *StatusRequest) (*StatusReply, error)
// returns nonce for given account
Nonce(context.Context, *NonceRequest) (*NonceReply, error)
+ // returns the list of blobs and proofs for a given list of blob hashes
+ GetBlobs(context.Context, *GetBlobsRequest) (*GetBlobsReply, error)
mustEmbedUnimplementedTxpoolServer()
}
@@ -224,6 +239,9 @@ func (UnimplementedTxpoolServer) Status(context.Context, *StatusRequest) (*Statu
func (UnimplementedTxpoolServer) Nonce(context.Context, *NonceRequest) (*NonceReply, error) {
return nil, status.Errorf(codes.Unimplemented, "method Nonce not implemented")
}
+func (UnimplementedTxpoolServer) GetBlobs(context.Context, *GetBlobsRequest) (*GetBlobsReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetBlobs not implemented")
+}
func (UnimplementedTxpoolServer) mustEmbedUnimplementedTxpoolServer() {}
func (UnimplementedTxpoolServer) testEmbeddedByValue() {}
@@ -400,6 +418,24 @@ func _Txpool_Nonce_Handler(srv interface{}, ctx context.Context, dec func(interf
return interceptor(ctx, in, info, handler)
}
+func _Txpool_GetBlobs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetBlobsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(TxpoolServer).GetBlobs(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Txpool_GetBlobs_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(TxpoolServer).GetBlobs(ctx, req.(*GetBlobsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
// Txpool_ServiceDesc is the grpc.ServiceDesc for Txpool service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
@@ -439,6 +475,10 @@ var Txpool_ServiceDesc = grpc.ServiceDesc{
MethodName: "Nonce",
Handler: _Txpool_Nonce_Handler,
},
+ {
+ MethodName: "GetBlobs",
+ Handler: _Txpool_GetBlobs_Handler,
+ },
},
Streams: []grpc.StreamDesc{
{
diff --git a/erigon-lib/gointerfaces/typesproto/types.pb.go b/erigon-lib/gointerfaces/typesproto/types.pb.go
index c1739560530..8d04788e6b0 100644
--- a/erigon-lib/gointerfaces/typesproto/types.pb.go
+++ b/erigon-lib/gointerfaces/typesproto/types.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.36.4
+// protoc-gen-go v1.36.3
// protoc v5.29.3
// source: types/types.proto
@@ -12,7 +12,6 @@ import (
descriptorpb "google.golang.org/protobuf/types/descriptorpb"
reflect "reflect"
sync "sync"
- unsafe "unsafe"
)
const (
@@ -1103,7 +1102,7 @@ var (
var File_types_types_proto protoreflect.FileDescriptor
-var file_types_types_proto_rawDesc = string([]byte{
+var file_types_types_proto_rawDesc = []byte{
0x0a, 0x11, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x12, 0x05, 0x74, 0x79, 0x70, 0x65, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63,
@@ -1264,16 +1263,16 @@ var file_types_types_proto_rawDesc = string([]byte{
0x61, 0x74, 0x63, 0x68, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x14, 0x5a, 0x12, 0x2e,
0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x73, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-})
+}
var (
file_types_types_proto_rawDescOnce sync.Once
- file_types_types_proto_rawDescData []byte
+ file_types_types_proto_rawDescData = file_types_types_proto_rawDesc
)
func file_types_types_proto_rawDescGZIP() []byte {
file_types_types_proto_rawDescOnce.Do(func() {
- file_types_types_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_types_types_proto_rawDesc), len(file_types_types_proto_rawDesc)))
+ file_types_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_types_types_proto_rawDescData)
})
return file_types_types_proto_rawDescData
}
@@ -1339,7 +1338,7 @@ func file_types_types_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: unsafe.Slice(unsafe.StringData(file_types_types_proto_rawDesc), len(file_types_types_proto_rawDesc)),
+ RawDescriptor: file_types_types_proto_rawDesc,
NumEnums: 0,
NumMessages: 15,
NumExtensions: 3,
@@ -1351,6 +1350,7 @@ func file_types_types_proto_init() {
ExtensionInfos: file_types_types_proto_extTypes,
}.Build()
File_types_types_proto = out.File
+ file_types_types_proto_rawDesc = nil
file_types_types_proto_goTypes = nil
file_types_types_proto_depIdxs = nil
}
diff --git a/erigon-lib/kv/remotedb/kv_remote.go b/erigon-lib/kv/remotedb/kv_remote.go
index a0654912f7c..f96ef1d1080 100644
--- a/erigon-lib/kv/remotedb/kv_remote.go
+++ b/erigon-lib/kv/remotedb/kv_remote.go
@@ -236,8 +236,12 @@ func (tx *tx) CollectMetrics() {}
func (tx *tx) IncrementSequence(bucket string, amount uint64) (uint64, error) {
panic("not implemented yet")
}
-func (tx *tx) ReadSequence(bucket string) (uint64, error) {
- panic("not implemented yet")
+func (tx *tx) ReadSequence(table string) (uint64, error) {
+ reply, err := tx.db.remoteKV.Sequence(tx.ctx, &remote.SequenceReq{TxId: tx.id, Table: table})
+ if err != nil {
+ return 0, err
+ }
+ return reply.Value, nil
}
func (tx *tx) Append(bucket string, k, v []byte) error { panic("no write methods") }
func (tx *tx) AppendDup(bucket string, k, v []byte) error { panic("no write methods") }
diff --git a/erigon-lib/kv/remotedbserver/remotedbserver.go b/erigon-lib/kv/remotedbserver/remotedbserver.go
index 7316e92fbf4..fab71da7850 100644
--- a/erigon-lib/kv/remotedbserver/remotedbserver.go
+++ b/erigon-lib/kv/remotedbserver/remotedbserver.go
@@ -465,6 +465,21 @@ func (s *KvServer) Snapshots(_ context.Context, _ *remote.SnapshotsRequest) (rep
return reply, nil
}
+func (s *KvServer) Sequence(_ context.Context, req *remote.SequenceReq) (reply *remote.SequenceReply, err error) {
+ reply = &remote.SequenceReply{}
+ if err := s.with(req.TxId, func(tx kv.Tx) error {
+ ttx, ok := tx.(kv.TemporalTx)
+ if !ok {
+ return errors.New("server DB doesn't implement kv.Temporal interface")
+ }
+ reply.Value, err = ttx.ReadSequence(req.Table)
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+ return reply, nil
+}
+
type StateChangePubSub struct {
chans map[uint]chan *remote.StateChangeBatch
id uint
diff --git a/erigon-lib/kv/tables.go b/erigon-lib/kv/tables.go
index 8f1a21a18e3..d94f7e7f540 100644
--- a/erigon-lib/kv/tables.go
+++ b/erigon-lib/kv/tables.go
@@ -509,7 +509,7 @@ type CmpFunc func(k1, k2, v1, v2 []byte) int
type TableCfg map[string]TableCfgItem
type Bucket string
-type DBI uint
+type DBI uint32
type TableFlags uint
const (
diff --git a/erigon-lib/log/v3/ext/id.go b/erigon-lib/log/v3/ext/id.go
deleted file mode 100644
index 0bfb1551f3a..00000000000
--- a/erigon-lib/log/v3/ext/id.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package ext
-
-import (
- "fmt"
- "math/rand"
- "sync"
- "time"
-)
-
-var r = rand.New(&lockedSource{src: rand.NewSource(time.Now().Unix())})
-
-// RandId creates a random identifier of the requested length.
-// Useful for assigning mostly-unique identifiers for logging
-// and identification that are unlikely to collide because of
-// short lifespan or low set cardinality
-func RandId(idlen int) string {
- b := make([]byte, idlen)
- var randVal uint32
- for i := 0; i < idlen; i++ {
- byteIdx := i % 4
- if byteIdx == 0 {
- randVal = r.Uint32()
- }
- b[i] = byte((randVal >> (8 * uint(byteIdx))) & 0xFF)
- }
- return fmt.Sprintf("%x", b)
-}
-
-// lockedSource is a wrapper to allow a rand.Source to be used
-// concurrently (same type as the one used internally in math/rand).
-type lockedSource struct {
- lk sync.Mutex
- src rand.Source
-}
-
-func (r *lockedSource) Int63() (n int64) {
- r.lk.Lock()
- n = r.src.Int63()
- r.lk.Unlock()
- return
-}
-
-func (r *lockedSource) Seed(seed int64) {
- r.lk.Lock()
- r.src.Seed(seed)
- r.lk.Unlock()
-}
diff --git a/erigon-lib/metrics/setup.go b/erigon-lib/metrics/setup.go
index d13f94ca03b..7d96343d0a2 100644
--- a/erigon-lib/metrics/setup.go
+++ b/erigon-lib/metrics/setup.go
@@ -19,11 +19,11 @@ package metrics
import (
"fmt"
"net/http"
+ "time"
+ "github.com/erigontech/erigon-lib/log/v3"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
-
- "github.com/erigontech/erigon-lib/log/v3"
)
var EnabledExpensive = false
@@ -37,8 +37,9 @@ func Setup(address string, logger log.Logger) *http.ServeMux {
prometheusMux.Handle("/debug/metrics/prometheus", promhttp.Handler())
promServer := &http.Server{
- Addr: address,
- Handler: prometheusMux,
+ Addr: address,
+ Handler: prometheusMux,
+ ReadHeaderTimeout: 5 * time.Second,
}
go func() {
diff --git a/erigon-lib/recsplit/index_reader.go b/erigon-lib/recsplit/index_reader.go
index bbf7ec7eb86..7a1776eca39 100644
--- a/erigon-lib/recsplit/index_reader.go
+++ b/erigon-lib/recsplit/index_reader.go
@@ -94,3 +94,4 @@ func (r *IndexReader) TwoLayerLookupByHash(hi, lo uint64) (uint64, bool) {
}
return r.index.OrdinalLookup(id), true
}
+func (r *IndexReader) BaseDataID() uint64 { return r.index.BaseDataID() }
diff --git a/erigon-lib/seg/compress.go b/erigon-lib/seg/compress.go
index 29f5b77f7c9..b1f12b6e540 100644
--- a/erigon-lib/seg/compress.go
+++ b/erigon-lib/seg/compress.go
@@ -34,6 +34,7 @@ import (
"time"
"github.com/c2h5oh/datasize"
+
"github.com/erigontech/erigon-lib/common"
dir2 "github.com/erigontech/erigon-lib/common/dir"
"github.com/erigontech/erigon-lib/etl"
@@ -82,8 +83,7 @@ var DefaultCfg = Cfg{
MaxDictPatterns: 64 * 1024,
DictReducerSoftLimit: 1_000_000,
-
- Workers: 1,
+ Workers: 1,
}
// Compressor is the main operating type for performing per-word compression
diff --git a/erigon-lib/seg/parallel_compress.go b/erigon-lib/seg/parallel_compress.go
index 6ad04a64327..022ed5b4810 100644
--- a/erigon-lib/seg/parallel_compress.go
+++ b/erigon-lib/seg/parallel_compress.go
@@ -782,8 +782,8 @@ func extractPatternsInSuperstrings(ctx context.Context, superstringCh chan []byt
} else {
inv = inv[:n]
}
- for i := 0; i < n; i++ {
- inv[filtered[i]] = int32(i)
+ for i := int32(0); i < int32(n); i++ {
+ inv[filtered[i]] = i
}
//logger.Info("Inverted array done")
var k int
diff --git a/erigon-lib/state/aggregator.go b/erigon-lib/state/aggregator.go
index bdc5d7ca1f7..f2b4762ea05 100644
--- a/erigon-lib/state/aggregator.go
+++ b/erigon-lib/state/aggregator.go
@@ -780,7 +780,7 @@ func (at *AggregatorRoTx) StepsInFiles(entitySet ...kv.Domain) uint64 {
if txNumInFiles > 0 {
txNumInFiles--
}
- return txNumInFiles / at.a.StepSize()
+ return txNumInFiles / at.StepSize()
}
func (at *AggregatorRoTx) TxNumsInFiles(entitySet ...kv.Domain) (minTxNum uint64) {
@@ -1054,7 +1054,7 @@ func (at *AggregatorRoTx) Prune(ctx context.Context, tx kv.RwTx, limit uint64, l
txTo := at.a.visibleFilesMinimaxTxNum.Load()
if txTo > 0 {
// txTo is first txNum in next step, has to go 1 tx behind to get correct step number
- step = (txTo - 1) / at.a.StepSize()
+ step = (txTo - 1) / at.StepSize()
}
if txFrom == txTo || !at.CanPrune(tx, txTo) {
@@ -1110,7 +1110,7 @@ func (at *AggregatorRoTx) LogStats(tx kv.Tx, tx2block func(endTxNumMinimax uint6
at.a.logger.Warn("[snapshots:history] Stat", "err", err)
return
}
- str = append(str, fmt.Sprintf("%d=%dK", item.endTxNum/at.a.StepSize(), bn/1_000))
+ str = append(str, fmt.Sprintf("%d=%dK", item.endTxNum/at.StepSize(), bn/1_000))
}
//str2 := make([]string, 0, len(at.storage.files))
//for _, item := range at.storage.files {
@@ -1129,7 +1129,7 @@ func (at *AggregatorRoTx) LogStats(tx kv.Tx, tx2block func(endTxNumMinimax uint6
return
}
}
- firstHistoryIndexBlockInDB, err := tx2block(at.d[kv.AccountsDomain].d.minStepInDB(tx) * at.a.StepSize())
+ firstHistoryIndexBlockInDB, err := tx2block(at.d[kv.AccountsDomain].d.minStepInDB(tx) * at.StepSize())
if err != nil {
at.a.logger.Warn("[snapshots:history] Stat", "err", err)
return
@@ -1284,8 +1284,8 @@ func (at *AggregatorRoTx) findMergeRange(maxEndTxNum, maxSpan uint64) *RangesV3
if !lmrCom.Equal(&lmrAcc) || !lmrCom.Equal(&lmrSto) {
// ensure that we do not make further merge progress until ranges are not equal
maxEndTxNum = min(maxEndTxNum, max(lmrAcc.to, lmrSto.to, lmrCom.to))
- at.a.logger.Warn("findMergeRange: hold further merge", "to", maxEndTxNum/at.a.StepSize(),
- "acc", lmrAcc.String("", at.a.StepSize()), "sto", lmrSto.String("", at.a.StepSize()), "com", lmrCom.String("", at.a.StepSize()))
+ at.a.logger.Warn("findMergeRange: hold further merge", "to", maxEndTxNum/at.StepSize(),
+ "acc", lmrAcc.String("", at.StepSize()), "sto", lmrSto.String("", at.StepSize()), "com", lmrCom.String("", at.StepSize()))
}
}
for id, d := range at.d {
@@ -1307,8 +1307,8 @@ func (at *AggregatorRoTx) findMergeRange(maxEndTxNum, maxSpan uint64) *RangesV3
// file for required range exists, hold this domain from merge but allow to merge comitemnt
r.domain[k].values = MergeRange{}
at.a.logger.Debug("findMergeRange: commitment range is different but file exists in domain, hold further merge",
- at.d[k].d.filenameBase, dr.values.String("vals", at.a.StepSize()),
- "commitment", cr.values.String("vals", at.a.StepSize()))
+ at.d[k].d.filenameBase, dr.values.String("vals", at.StepSize()),
+ "commitment", cr.values.String("vals", at.StepSize()))
continue
}
restorePrevRange = true
@@ -1318,7 +1318,7 @@ func (at *AggregatorRoTx) findMergeRange(maxEndTxNum, maxSpan uint64) *RangesV3
for k, dr := range &r.domain {
r.domain[k].values = MergeRange{}
at.a.logger.Debug("findMergeRange: commitment range is different than accounts or storage, cancel kv merge",
- at.d[k].d.filenameBase, dr.values.String("", at.a.StepSize()))
+ at.d[k].d.filenameBase, dr.values.String("", at.StepSize()))
}
}
}
diff --git a/erigon-lib/state/appendable.go b/erigon-lib/state/appendable.go
new file mode 100644
index 00000000000..3a249b2c8f8
--- /dev/null
+++ b/erigon-lib/state/appendable.go
@@ -0,0 +1,442 @@
+package state
+
+import (
+ "context"
+ "encoding/binary"
+ "errors"
+
+ "github.com/erigontech/erigon-lib/etl"
+ "github.com/erigontech/erigon-lib/kv"
+ "github.com/erigontech/erigon-lib/log/v3"
+ ae "github.com/erigontech/erigon-lib/state/appendable_extras"
+)
+
+const MaxUint64 = ^uint64(0)
+
+type RootRelationI interface {
+ RootNum2Num(from RootNum, tx kv.Tx) (Num, error)
+}
+
+type BufferFactory interface {
+ New() etl.Buffer
+}
+
+type markedStructure struct {
+ canonicalTbl string
+}
+
+var _ StartRoTx[EntityTxI] = (*Appendable[EntityTxI])(nil)
+var ErrNotFoundInSnapshot = errors.New("entity not found in snapshot")
+
+type Appendable[T EntityTxI] struct {
+ *ProtoAppendable
+
+ ms *markedStructure
+ valsTbl string
+
+ ts4Bytes bool
+ pruneFrom Num // should this be rootnum? Num is fine for now.
+ beginFilesRoGen func() T
+
+ rel RootRelationI
+}
+
+type AppOpts[T EntityTxI] func(a *Appendable[T])
+
+func App_WithFreezer[T EntityTxI](freezer Freezer) AppOpts[T] {
+ return func(a *Appendable[T]) {
+ a.freezer = freezer
+ }
+}
+
+func App_WithIndexBuilders[T EntityTxI](builders ...AccessorIndexBuilder) AppOpts[T] {
+ return func(a *Appendable[T]) {
+ a.builders = builders
+ }
+}
+
+func App_WithTs4Bytes[T EntityTxI](ts4Bytes bool) AppOpts[T] {
+ return func(a *Appendable[T]) {
+ a.ts4Bytes = ts4Bytes
+ }
+}
+
+func App_WithPruneFrom[T EntityTxI](pruneFrom Num) AppOpts[T] {
+ return func(a *Appendable[T]) {
+ a.pruneFrom = pruneFrom
+ }
+}
+
+// func App
+func NewMarkedAppendable(id AppendableId, valsTbl string, canonicalTbl string, relation RootRelationI, logger log.Logger, options ...AppOpts[MarkedTxI]) (*Appendable[MarkedTxI], error) {
+ a, err := create(id, Marked, valsTbl, canonicalTbl, relation, logger, options...)
+ if err != nil {
+ return nil, err
+ }
+
+ a.beginFilesRoGen = func() MarkedTxI {
+ return &MarkedTx{
+ ProtoAppendableTx: a.ProtoAppendable.BeginFilesRo(),
+ ap: a,
+ }
+ }
+
+ return a, nil
+}
+
+func NewUnmarkedAppendable(id AppendableId, valsTbl string, relation RootRelationI, logger log.Logger, options ...AppOpts[UnmarkedTxI]) (*Appendable[UnmarkedTxI], error) {
+ a, err := create(id, Unmarked, valsTbl, "", relation, logger, options...)
+ if err != nil {
+ return nil, err
+ }
+
+ // un-marked structure have default freezer and builders
+ if a.freezer == nil {
+ freezer := &SimpleRelationalFreezer{rel: relation, valsTbl: valsTbl}
+ a.freezer = freezer
+ }
+
+ if a.builders == nil {
+ // mapping num -> offset (ordinal map)
+ builder := NewSimpleAccessorBuilder(NewAccessorArgs(true, false), id, logger)
+ a.builders = []AccessorIndexBuilder{builder}
+ }
+
+ a.beginFilesRoGen = func() UnmarkedTxI {
+ return &UnmarkedTx{
+ ProtoAppendableTx: a.ProtoAppendable.BeginFilesRo(),
+ ap: a,
+ }
+ }
+
+ return a, nil
+}
+
+func NewAppendingAppendable(id AppendableId, valsTbl string, relation RootRelationI, logger log.Logger, options ...AppOpts[AppendingTxI]) (*Appendable[AppendingTxI], error) {
+ a, err := create(id, Appending, valsTbl, "", relation, logger, options...)
+ if err != nil {
+ return nil, err
+ }
+ a.beginFilesRoGen = func() AppendingTxI {
+ return &AppendingTx{
+ ProtoAppendableTx: a.ProtoAppendable.BeginFilesRo(),
+ ap: a,
+ }
+ }
+ return a, nil
+}
+
+func NewBufferedAppendable(id AppendableId, valsTbl string, relation RootRelationI, factory BufferFactory, logger log.Logger, options ...AppOpts[BufferedTxI]) (*Appendable[BufferedTxI], error) {
+ a, err := create(id, Buffered, valsTbl, "", relation, logger, options...)
+ if err != nil {
+ return nil, err
+ }
+
+ if factory == nil {
+ panic("no factory")
+ }
+
+ a.beginFilesRoGen = func() BufferedTxI {
+ return &BufferedTx{
+ ProtoAppendableTx: a.ProtoAppendable.BeginFilesRo(),
+ ap: a,
+ factory: factory,
+ }
+ }
+
+ // TODO: default builders and index builders
+ return a, nil
+}
+
+func create[T EntityTxI](id AppendableId, strategy CanonicityStrategy, valsTbl string, canonicalTbl string, relation RootRelationI, logger log.Logger, options ...AppOpts[T]) (*Appendable[T], error) {
+ a := &Appendable[T]{
+ ProtoAppendable: NewProto(id, nil, nil, logger),
+ }
+ a.rel = relation
+ a.valsTbl = valsTbl
+ if canonicalTbl != "" {
+ a.ms = &markedStructure{canonicalTbl: canonicalTbl}
+ }
+
+ for _, opt := range options {
+ opt(a)
+ }
+ a.strategy = strategy
+ return a, nil
+}
+
+func (a *Appendable[T]) PruneFrom() Num {
+ return a.pruneFrom
+}
+
+func (a *Appendable[T]) encTs(ts Num) []byte {
+ return ts.EncToBytes(!a.ts4Bytes)
+}
+
+func (a *Appendable[T]) BeginFilesRo() T {
+ return a.beginFilesRoGen()
+}
+
+// marked tx
+type MarkedTx struct {
+ *ProtoAppendableTx
+ ap *Appendable[MarkedTxI]
+}
+
+func (m *MarkedTx) Get(entityNum Num, tx kv.Tx) (value Bytes, foundInSnapshot bool, err error) {
+ data, found, err := m.LookupFile(entityNum, tx)
+ if err != nil {
+ return nil, false, err
+ }
+ if !found {
+ data, err := m.getDb(entityNum, nil, tx)
+ return data, false, err
+ }
+
+ switch m.ap.a.Name() {
+ case "headers":
+ // remove the first byte; it's first byte of header hash
+ // we should ultimately remove this first byte...as it's an old implementation of
+ // LessFalsePositives=True
+ data = data[1:]
+ }
+ return data, true, nil
+}
+
+func (m *MarkedTx) getDb(entityNum Num, hash []byte, tx kv.Tx) (Bytes, error) {
+ a := m.ap
+ if hash == nil {
+ // find canonical hash
+ canHash, err := tx.GetOne(a.ms.canonicalTbl, a.encTs(entityNum))
+ if err != nil {
+ return nil, err
+ }
+ hash = canHash
+ }
+ return tx.GetOne(a.valsTbl, m.combK(entityNum, hash))
+}
+
+func (m *MarkedTx) GetNc(num Num, hash []byte, tx kv.Tx) (Bytes, error) {
+ return m.getDb(num, hash, tx)
+}
+
+func (m *MarkedTx) Put(num Num, hash []byte, val Bytes, tx kv.RwTx) error {
+ // can then val
+ a := m.ap
+ if err := tx.Append(a.ms.canonicalTbl, a.encTs(num), hash); err != nil {
+ return err
+ }
+
+ key := m.combK(num, hash)
+ return tx.Put(a.valsTbl, key, val)
+}
+
+func (m *MarkedTx) Unwind(ctx context.Context, from RootNum, tx kv.RwTx) error {
+ a := m.ap
+ efrom, err := a.rel.RootNum2Num(from, tx)
+ if err != nil {
+ return err
+ }
+ fromKey := a.encTs(efrom)
+ _, err = ae.DeleteRangeFromTbl(a.ms.canonicalTbl, fromKey, nil, MaxUint64, tx)
+ return err
+}
+
+func (m *MarkedTx) Prune(ctx context.Context, to RootNum, limit uint64, tx kv.RwTx) (pruneCount uint64, err error) {
+ a := m.ap
+ fromKeyPrefix := a.encTs(a.pruneFrom)
+ eto, err := a.rel.RootNum2Num(to, tx)
+ if err != nil {
+ return 0, err
+ }
+ toKeyPrefix := a.encTs(eto)
+ if del, err := ae.DeleteRangeFromTbl(a.ms.canonicalTbl, fromKeyPrefix, toKeyPrefix, limit, tx); err != nil {
+ return del, err
+ }
+
+ return ae.DeleteRangeFromTbl(a.valsTbl, fromKeyPrefix, toKeyPrefix, limit, tx)
+}
+
+func (m *MarkedTx) combK(ts Num, hash []byte) []byte {
+ // relevant only for marked appendable
+ // assuming hash is common.Hash which is 32 bytes
+ const HashBytes = 32
+ k := make([]byte, 8+HashBytes)
+ binary.BigEndian.PutUint64(k, uint64(ts))
+ copy(k[8:], hash)
+ return k
+}
+
+// unmarked tx
+type UnmarkedTx struct {
+ *ProtoAppendableTx
+ ap *Appendable[UnmarkedTxI]
+}
+
+func (m *UnmarkedTx) Get(entityNum Num, tx kv.Tx) (value Bytes, pruneCount bool, err error) {
+ ap := m.ap
+ data, found, err := m.LookupFile(entityNum, tx)
+ if err != nil {
+ return nil, false, err
+ }
+ if !found {
+ data, err := tx.GetOne(ap.valsTbl, ap.encTs(entityNum))
+ return data, false, err
+ }
+
+ return data, true, nil
+}
+
+func (m *UnmarkedTx) Append(entityNum Num, value Bytes, tx kv.RwTx) error {
+ return tx.Append(m.ap.valsTbl, m.ap.encTs(entityNum), value)
+}
+
+func (m *UnmarkedTx) Unwind(ctx context.Context, from RootNum, tx kv.RwTx) error {
+ ap := m.ap
+ fromId, err := ap.rel.RootNum2Num(from, tx)
+ if err != nil {
+ return err
+ }
+ _, err = ae.DeleteRangeFromTbl(ap.valsTbl, ap.encTs(fromId), nil, 0, tx)
+ return err
+}
+
+func (m *UnmarkedTx) Prune(ctx context.Context, to RootNum, limit uint64, tx kv.RwTx) (pruneCount uint64, err error) {
+ ap := m.ap
+ toId, err := ap.rel.RootNum2Num(to, tx)
+ if err != nil {
+ return 0, err
+ }
+ log.Info("pruning", "appendable", ap.a.Name(), "from", ap.pruneFrom, "to", toId)
+
+ eFrom := ap.encTs(ap.pruneFrom)
+ eTo := ap.encTs(toId)
+ return ae.DeleteRangeFromTbl(ap.valsTbl, eFrom, eTo, limit, tx)
+}
+
+type AppendingTx struct {
+ *ProtoAppendableTx
+ ap *Appendable[AppendingTxI]
+}
+
+// Get operates on snapshots only, it doesn't do resolution of
+// Num -> Id needed for finding canonical values in db.
+func (m *AppendingTx) Get(entityNum Num, tx kv.Tx) (value Bytes, foundInSnapshot bool, err error) {
+ // snapshots only
+ data, found, err := m.LookupFile(entityNum, tx)
+ if err != nil {
+ return nil, false, err
+ }
+ if !found {
+ return nil, false, ErrNotFoundInSnapshot
+ }
+ return data, true, nil
+}
+
+func (m *AppendingTx) GetNc(entityId Id, tx kv.Tx) (Bytes, error) {
+ return tx.GetOne(m.ap.valsTbl, m.ap.encTs(Num(entityId)))
+}
+
+func (m *AppendingTx) Append(entityId Id, value Bytes, tx kv.RwTx) error {
+ return tx.Append(m.ap.valsTbl, m.ap.encTs(Num(entityId)), value)
+}
+
+func (m *AppendingTx) IncrementSequence(amount uint64, tx kv.RwTx) (baseId uint64, err error) {
+ return tx.IncrementSequence(m.ap.valsTbl, amount)
+}
+
+func (m *AppendingTx) ReadSequence(tx kv.Tx) (uint64, error) {
+ return tx.ReadSequence(m.ap.valsTbl)
+}
+
+func (m *AppendingTx) ResetSequence(value uint64, tx kv.RwTx) error {
+ return tx.ResetSequence(m.ap.valsTbl, value)
+}
+
+func (m *AppendingTx) Unwind(ctx context.Context, from RootNum, tx kv.RwTx) error {
+ ap := m.ap
+ fromId, err := ap.rel.RootNum2Num(from, tx)
+ if err != nil {
+ return err
+ }
+ _, err = ae.DeleteRangeFromTbl(ap.valsTbl, ap.encTs(fromId), nil, 0, tx)
+ return err
+}
+
+func (m *AppendingTx) Prune(ctx context.Context, to RootNum, limit uint64, tx kv.RwTx) (pruneCount uint64, err error) {
+ ap := m.ap
+ toId, err := ap.rel.RootNum2Num(to, tx)
+ if err != nil {
+ return 0, err
+ }
+ log.Info("pruning", "appendable", ap.a.Name(), "from", ap.pruneFrom, "to", toId)
+
+ eFrom := ap.encTs(ap.pruneFrom)
+ eTo := ap.encTs(toId)
+ return ae.DeleteRangeFromTbl(ap.valsTbl, eFrom, eTo, limit, tx)
+}
+
+type BufferedTx struct {
+ *ProtoAppendableTx
+ ap *Appendable[BufferedTxI]
+ values *etl.Collector
+ factory BufferFactory
+}
+
+// Get doesn't reflect the values currently in Buffer
+func (m *BufferedTx) Get(entityNum Num, tx kv.Tx) (data Bytes, foundInSnapshot bool, err error) {
+ data, err = tx.GetOne(m.ap.valsTbl, m.ap.encTs(entityNum))
+ return data, false, err
+}
+
+func (m *BufferedTx) Put(entityNum Num, value Bytes) error {
+ if m.values == nil {
+ m.values = etl.NewCollector(m.id.Name()+".appendable.flush",
+ m.id.Dirs().Tmp, m.factory.New(), m.a.logger).LogLvl(log.LvlTrace)
+ }
+
+ key := m.ap.encTs(entityNum)
+ return m.values.Collect(key, value)
+}
+
+func (m *BufferedTx) Flush(ctx context.Context, tx kv.RwTx) error {
+ if m.values == nil {
+ return nil
+ }
+ // load uses Append since identityLoadFunc is used.
+ // might want to configure other TransformArgs here?
+ return m.values.Load(tx, m.ap.valsTbl, etl.IdentityLoadFunc, etl.TransformArgs{Quit: ctx.Done()})
+}
+
+func (m *BufferedTx) Prune(ctx context.Context, to RootNum, limit uint64, tx kv.RwTx) (pruneCount uint64, err error) {
+ ap := m.ap
+ toId, err := ap.rel.RootNum2Num(to, tx)
+ if err != nil {
+ return 0, err
+ }
+ log.Info("pruning", "appendable", ap.a.Name(), "from", ap.pruneFrom, "to", toId)
+
+ eFrom := ap.encTs(ap.pruneFrom)
+ eTo := ap.encTs(toId)
+ return ae.DeleteRangeFromTbl(ap.valsTbl, eFrom, eTo, limit, tx)
+}
+
+func (m *BufferedTx) Unwind(ctx context.Context, from RootNum, tx kv.RwTx) error {
+ // no op
+ return nil
+}
+
+func (m *BufferedTx) Close() {
+ if m.values != nil {
+ m.values.Close()
+ }
+
+ m.ProtoAppendableTx.Close()
+}
+
+var (
+ _ MarkedTxI = (*MarkedTx)(nil)
+ _ UnmarkedTxI = (*UnmarkedTx)(nil)
+ _ AppendingTxI = (*AppendingTx)(nil)
+ _ BufferedTxI = (*BufferedTx)(nil)
+)
diff --git a/erigon-lib/state/appendable_extras/registry.go b/erigon-lib/state/appendable_extras/registry.go
new file mode 100644
index 00000000000..aa6c8e78cdd
--- /dev/null
+++ b/erigon-lib/state/appendable_extras/registry.go
@@ -0,0 +1,220 @@
+package entity_extras
+
+import (
+ "crypto/rand"
+ "encoding/binary"
+ "os"
+ "path"
+ "sync"
+
+ "github.com/erigontech/erigon-lib/chain/snapcfg"
+ "github.com/erigontech/erigon-lib/common/datadir"
+ "github.com/erigontech/erigon-lib/common/dir"
+)
+
+// AppendableId id as a uint64, returned by `RegisterAppendable`. It is dependent on
+// the order of registration, and so counting on it being constant across reboots
+// might be tricky.
+type AppendableId uint16
+
+type holder struct {
+ name string
+ snapshotNameBase string // name to be used in snapshot file
+ indexNameBases []string // one indexNameBase for each index
+ dirs datadir.Dirs
+ snapshotDir string
+ saltFile string
+ snapshotCreationConfig *SnapshotConfig
+}
+
+// keeping this fixed size, so that append() does not potentially re-allocate array
+// to a different address. This means that the "reads" (methods on AppendableId) can
+// be done without any locks.
+var entityRegistry [20]holder
+var curr uint16
+
+var mu sync.RWMutex
+
+// RegisterAppendable
+// name: just user-defined name for identification
+// dirs: directory where snapshots have to reside
+// salt: for creation of indexes.
+// pre: preverified files are snapshot file lists that gets downloaded initially.
+func RegisterAppendable(name string, dirs datadir.Dirs, pre snapcfg.Preverified, options ...EntityIdOption) AppendableId {
+ h := &holder{
+ name: name,
+ dirs: dirs,
+ }
+ for _, opt := range options {
+ opt(h)
+ }
+
+ if h.snapshotNameBase == "" {
+ h.snapshotNameBase = name
+ }
+
+ if h.indexNameBases == nil {
+ // default
+ h.indexNameBases = []string{name}
+ }
+
+ if h.snapshotDir == "" {
+ h.snapshotDir = dirs.Snap
+ }
+
+ if h.saltFile == "" {
+ h.saltFile = path.Join(dirs.Snap, "salt-blocks.txt")
+ }
+
+ if h.snapshotCreationConfig == nil {
+ panic("snapshotCreationConfig is required")
+ }
+
+ mu.Lock()
+
+ entityRegistry[curr] = *h
+ id := AppendableId(curr)
+ h.snapshotCreationConfig.SetupConfig(id, h.snapshotDir, pre)
+ curr++
+
+ mu.Unlock()
+
+ return id
+}
+
+func Cleanup() {
+ // only for tests
+ mu.Lock()
+ curr = 0
+ mu.Unlock()
+}
+
+type EntityIdOption func(*holder)
+
+func WithSnapshotPrefix(prefix string) EntityIdOption {
+ return func(a *holder) {
+ a.snapshotNameBase = prefix
+ }
+}
+
+func WithIndexFileType(indexFileType []string) EntityIdOption {
+ return func(a *holder) {
+ a.indexNameBases = indexFileType
+ }
+}
+
+func WithSnapshotCreationConfig(cfg *SnapshotConfig) EntityIdOption {
+ return func(a *holder) {
+ a.snapshotCreationConfig = cfg
+ }
+}
+
+func WithSaltFile(saltFile string) EntityIdOption {
+ return func(a *holder) {
+ a.saltFile = saltFile
+ }
+}
+
+func WithSnapshotDir(dir string) EntityIdOption {
+ return func(a *holder) {
+ a.snapshotDir = dir
+ }
+}
+
+func (a AppendableId) Id() uint64 {
+ return uint64(a)
+}
+
+func (a AppendableId) Name() string {
+ return entityRegistry[a].name
+}
+
+func (a AppendableId) SnapshotPrefix() string {
+ return entityRegistry[a].snapshotNameBase
+}
+
+func (a AppendableId) IndexPrefix() []string {
+ return entityRegistry[a].indexNameBases
+}
+
+func (a AppendableId) String() string {
+ return entityRegistry[a].name
+}
+
+func (a AppendableId) Dirs() datadir.Dirs {
+ return entityRegistry[a].dirs
+}
+
+func (a AppendableId) SnapshotDir() string {
+ return entityRegistry[a].snapshotDir
+}
+
+func (a AppendableId) SnapshotConfig() *SnapshotConfig {
+ return entityRegistry[a].snapshotCreationConfig
+}
+
+func (a AppendableId) Salt() (uint32, error) {
+ // not computing salt an EntityId inception
+ // since salt file might not be downloaded yet.
+ saltFile := entityRegistry[a].saltFile
+ baseDir := path.Dir(saltFile)
+ saltLock.RLock()
+ salt, ok := saltMap[baseDir]
+ saltLock.RUnlock()
+ if ok {
+ return salt, nil
+ }
+
+ saltLock.Lock()
+ salt, err := readAndCreateSaltIfNeeded(saltFile)
+ if err != nil {
+ return 0, err
+ }
+
+ saltMap[baseDir] = salt
+ saltLock.Unlock()
+
+ return salt, nil
+}
+
+var saltMap = map[string]uint32{}
+var saltLock sync.RWMutex
+
+func readAndCreateSaltIfNeeded(saltFile string) (uint32, error) {
+ exists, err := dir.FileExist(saltFile)
+ if err != nil {
+ return 0, err
+ }
+ baseDir := path.Dir(saltFile)
+
+ if !exists {
+ dir.MustExist(baseDir)
+
+ saltBytes := make([]byte, 4)
+ _, err := rand.Read(saltBytes)
+ if err != nil {
+ return 0, err
+ }
+ if err := dir.WriteFileWithFsync(saltFile, saltBytes, os.ModePerm); err != nil {
+ return 0, err
+ }
+ }
+ saltBytes, err := os.ReadFile(saltFile)
+ if err != nil {
+ return 0, err
+ }
+ if len(saltBytes) != 4 {
+ dir.MustExist(baseDir)
+
+ saltBytes := make([]byte, 4)
+ _, err := rand.Read(saltBytes)
+ if err != nil {
+ return 0, err
+ }
+ if err := dir.WriteFileWithFsync(saltFile, saltBytes, os.ModePerm); err != nil {
+ return 0, err
+ }
+ }
+
+ return binary.BigEndian.Uint32(saltBytes), nil
+}
diff --git a/erigon-lib/state/appendable_extras/snapshot_info.go b/erigon-lib/state/appendable_extras/snapshot_info.go
new file mode 100644
index 00000000000..5005f4adaae
--- /dev/null
+++ b/erigon-lib/state/appendable_extras/snapshot_info.go
@@ -0,0 +1,257 @@
+package entity_extras
+
+import (
+ "fmt"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/erigontech/erigon-lib/chain/snapcfg"
+ "github.com/erigontech/erigon-lib/downloader/snaptype"
+)
+
+//const EntitiesPerStep = uint64(1000)
+
+type SnapshotCreationConfig struct {
+ // number of entities per step
+ // should be same for all entitiin a entity set
+ EntitiesPerStep uint64
+
+ // all the following configs are in terms of number of entities
+ // 1 step has `EntitiesPerStep` entities
+
+ // how many (root) entities to leave in db (and not consider for freezing) this is needed
+ // since blockchains reorg and so we don't freeze latest entities.
+ SafetyMargin uint64
+
+ // progressively merge smaller files into large ones.
+ // maximum size (merge limit) is the last element of MergeStages
+ // decreasing order expected, each step is a multiple of the previous one
+ // e.g. [1000, 20000, 600000] --> first stage creates files of size 1000; then 20 of these merged to
+ // create size 10000; then 30 of these merged to create size 100000
+ // each must be divisible by `EntitiesPerStep`
+ MergeStages []uint64
+
+ // minimum snapshot size
+ // must be divisible by `EntitiesPerStep`
+ MinimumSize uint64
+
+ // SeedableSize uint64 // TODO: minimum size of file for it to be seedable.
+
+ // preverified can have larger files than that indicated by `MergeSteps.last`.
+ // This is because previously, different values might have been used.
+ //Preverified snapcfg.Preverified
+ preverifiedParsed []*FileInfo
+}
+
+type SnapshotConfig struct {
+ *SnapshotCreationConfig
+
+ // alignment means that the read-only snapshot view of this entity
+ // is aligned to those of the root entity.
+ RootAligned bool
+}
+
+func (s *SnapshotConfig) StepsInFrozenFile() uint64 {
+ return s.MergeStages[len(s.MergeStages)-1] / s.EntitiesPerStep
+}
+
+func (s *SnapshotConfig) SetupConfig(id AppendableId, snapshotDir string, pre snapcfg.Preverified) {
+ if s.preverifiedParsed != nil {
+ return
+ }
+ s.preverifiedParsed = make([]*FileInfo, 0, len(pre))
+ for _, item := range []snapcfg.PreverifiedItem(pre) {
+ res, ok := ParseFileName(id, item.Name)
+ if !ok {
+ continue
+ }
+ s.preverifiedParsed = append(s.preverifiedParsed, res)
+ }
+
+ // some validation
+ for i := range s.MergeStages {
+ if s.MergeStages[i]%s.EntitiesPerStep != 0 {
+ panic(fmt.Sprintf("MergeStages[%d] must be divisible by EntitiesPerStep", i))
+ }
+ }
+ if s.MinimumSize%s.EntitiesPerStep != 0 {
+ panic(fmt.Sprintf("MinimumSize must be divisible by EntitiesPerStep"))
+ }
+}
+
+// parse snapshot file info
+type FileInfo struct {
+ Version snaptype.Version
+ From, To uint64
+ Name string // filename
+ Path string // full path
+ Ext string // extenstion
+ Id AppendableId
+}
+
+func (f *FileInfo) IsIndex() bool { return strings.Compare(f.Ext, ".idx") == 0 }
+
+func (f *FileInfo) IsSeg() bool { return strings.Compare(f.Ext, ".seg") == 0 }
+
+func (f *FileInfo) Len() uint64 { return f.To - f.From }
+
+func (f *FileInfo) Dir() string { return filepath.Dir(f.Path) }
+
+// TODO: snaptype.Version should be replaced??
+
+func fileName(baseName string, version snaptype.Version, from, to uint64) string {
+ // from, to are in units of steps and not in number of entities
+ return fmt.Sprintf("v%d-%06d-%06d-%s", version, from, to, baseName)
+}
+
+func SnapFilePath(id AppendableId, version snaptype.Version, from, to RootNum) string {
+ return filepath.Join(id.SnapshotDir(), fileName(id.Name(), version, from.Step(id), to.Step(id))+".seg")
+}
+
+func IdxFilePath(id AppendableId, version snaptype.Version, from, to RootNum, idxNum uint64) string {
+ return filepath.Join(id.SnapshotDir(), fileName(id.IndexPrefix()[idxNum], version, from.Step(id), to.Step(id))+".idx")
+}
+
+func ParseFileName(id AppendableId, fileName string) (res *FileInfo, ok bool) {
+ return ParseFileNameInDir(id, id.SnapshotDir(), fileName)
+}
+
+func ParseFileNameInDir(id AppendableId, dir, fileName string) (res *FileInfo, ok bool) {
+ // 'v1-000000-000500-transactions.seg'
+ // 'v1-017000-017500-transactions-to-block.idx'
+ ext := filepath.Ext(fileName)
+ if ext != ".seg" && ext != ".idx" {
+ return nil, false
+ }
+ onlyName := fileName[:len(fileName)-len(ext)]
+ parts := strings.SplitN(onlyName, "-", 4)
+ res = &FileInfo{Path: filepath.Join(dir, fileName), Name: fileName, Ext: ext}
+
+ if len(parts) < 4 {
+ return nil, ok
+ }
+
+ var err error
+ res.Version, err = snaptype.ParseVersion(parts[0])
+ if err != nil {
+ return res, false
+ }
+
+ from, err := strconv.ParseUint(parts[1], 10, 64)
+ if err != nil {
+ return res, false
+ }
+ eps := id.SnapshotConfig().EntitiesPerStep
+ res.From = from * eps
+ to, err := strconv.ParseUint(parts[2], 10, 64)
+ if err != nil {
+ return res, false
+ }
+ res.To = to * eps
+
+ res.Id = id
+ name := parts[3]
+ // it should either match snapshot or its indexes
+ if strings.Compare(name, id.SnapshotPrefix()) == 0 {
+ return res, true
+ } else {
+ for _, prefix := range id.IndexPrefix() {
+ if strings.Compare(name, prefix) == 0 {
+ return res, true
+ }
+
+ }
+ }
+
+ return nil, false
+}
+
+// determine freezing ranges, given snapshot creation config
+func GetFreezingRange(rootFrom, rootTo RootNum, id AppendableId) (freezeFrom RootNum, freezeTo RootNum, canFreeze bool) {
+ /**
+ 1. `from`, `to` must be round off to minimum size (atleast)
+ 2. mergeLimit is a function: (from, preverified files, mergeLimit default) -> biggest file size starting `from`
+ 3. if mergeLimit size is not possible, then `freezeTo` should be next largest possible file size
+ as allowed by the MergeSteps or MinimumSize.
+ **/
+
+ if rootFrom >= rootTo {
+ return rootFrom, rootTo, false
+ }
+
+ cfg := id.SnapshotConfig()
+ from := uint64(rootFrom)
+ to := uint64(rootTo)
+
+ to = to - cfg.SafetyMargin
+ from = (from / cfg.MinimumSize) * cfg.MinimumSize
+ to = (to / cfg.MinimumSize) * cfg.MinimumSize
+
+ mergeLimit := getMergeLimit(id, from)
+ maxJump := cfg.EntitiesPerStep
+
+ if from%mergeLimit == 0 {
+ maxJump = mergeLimit
+ } else {
+ for i := len(cfg.MergeStages) - 1; i >= 0; i-- {
+ if from%cfg.MergeStages[i] == 0 {
+ maxJump = cfg.MergeStages[i]
+ break
+ }
+ }
+ }
+
+ _freezeFrom := from
+ var _freezeTo uint64
+ jump := to - from
+
+ switch {
+ case jump >= maxJump:
+ // enough data, max jump
+ _freezeTo = _freezeFrom + maxJump
+ case jump >= cfg.MergeStages[0]:
+ // else find if a merge step can be used
+ // assuming merge step multiple of each other
+ for i := len(cfg.MergeStages) - 1; i >= 0; i-- {
+ if jump >= cfg.MergeStages[i] {
+ _freezeTo = _freezeFrom + cfg.MergeStages[i]
+ break
+ }
+ }
+ case jump >= cfg.MinimumSize:
+ // else use minimum size
+ _freezeTo = _freezeFrom + cfg.MinimumSize
+
+ default:
+ _freezeTo = _freezeFrom
+ }
+
+ return RootNum(_freezeFrom), RootNum(_freezeTo), _freezeTo-_freezeFrom >= cfg.MinimumSize
+}
+
+func getMergeLimit(id AppendableId, from uint64) uint64 {
+ //return 0
+ cfg := id.SnapshotConfig()
+ maxMergeLimit := cfg.MergeStages[len(cfg.MergeStages)-1]
+
+ for _, info := range cfg.preverifiedParsed {
+ if !info.IsSeg() {
+ continue
+ }
+
+ if from < info.From || from >= info.To {
+ continue
+ }
+
+ if info.Len() >= maxMergeLimit {
+ // info.Len() > maxMergeLimit --> this happens when previously a larger value
+ // was used, and now the configured merge limit is smaller.
+ return info.Len()
+ }
+
+ break
+ }
+
+ return maxMergeLimit
+}
diff --git a/erigon-lib/state/appendable_extras/types.go b/erigon-lib/state/appendable_extras/types.go
new file mode 100644
index 00000000000..b1ae8f887e5
--- /dev/null
+++ b/erigon-lib/state/appendable_extras/types.go
@@ -0,0 +1,38 @@
+package entity_extras
+
+/** custom types **/
+
+// canonical sequence number of entity (in context)
+type Num uint64
+
+// sequence number of entity - might contain non-canonical values
+type Id uint64
+
+// canonical sequence number of the root entity (or secondary key)
+type RootNum uint64
+
+type Bytes = []byte
+
+func (n Num) Step(a AppendableId) uint64 {
+ return step(n, a)
+}
+
+func (n RootNum) Step(a AppendableId) uint64 {
+ return step(n, a)
+}
+
+func (x Id) EncToBytes(x8Bytes bool) (out []byte) {
+ return EncToBytes(x, x8Bytes)
+}
+func (x Id) EncTo8Bytes() (out []byte) {
+ return EncToBytes(x, true)
+}
+func (x Num) EncToBytes(x8Bytes bool) (out []byte) {
+ return EncToBytes(x, x8Bytes)
+}
+func (x Num) EncTo8Bytes() (out []byte) {
+ return EncToBytes(x, true)
+}
+func (x RootNum) EncTo8Bytes() (out []byte) {
+ return EncToBytes(x, true)
+}
diff --git a/erigon-lib/state/appendable_extras/utils.go b/erigon-lib/state/appendable_extras/utils.go
new file mode 100644
index 00000000000..d722026a37f
--- /dev/null
+++ b/erigon-lib/state/appendable_extras/utils.go
@@ -0,0 +1,65 @@
+package entity_extras
+
+import (
+ "bytes"
+ "encoding/binary"
+
+ "github.com/erigontech/erigon-lib/kv"
+)
+
+func step[T ~uint64](n T, a AppendableId) uint64 {
+ return uint64(n) / a.SnapshotConfig().EntitiesPerStep
+}
+
+func EncToBytes[T ~uint64](x T, x8Bytes bool) (out []byte) {
+ if x8Bytes {
+ out = make([]byte, 8)
+ binary.BigEndian.PutUint64(out, uint64(x))
+ } else {
+ out = make([]byte, 4)
+ binary.BigEndian.PutUint32(out, uint32(x))
+ }
+ return
+}
+
+func Decode64FromBytes(buf []byte, x8Bytes bool) (x uint64) {
+ if x8Bytes {
+ x = binary.BigEndian.Uint64(buf)
+ } else {
+ x = uint64(binary.BigEndian.Uint32(buf))
+ }
+ return
+}
+
+// toPrefix exclusive
+func DeleteRangeFromTbl(tbl string, fromPrefix, toPrefix []byte, limit uint64, rwTx kv.RwTx) (delCount uint64, err error) {
+ c, err := rwTx.RwCursor(tbl) // TODO: no dupsort tbl assumed
+ if err != nil {
+ return
+ }
+
+ defer c.Close()
+ // bigendianess assumed (for key comparison)
+ // imo this can be generalized if needed, by using key comparison functions, which mdbx provides.
+ for k, _, err := c.Seek(fromPrefix); k != nil && (toPrefix == nil || bytes.Compare(k, toPrefix) < 0) && limit > 0; k, _, err = c.Next() {
+ if err != nil {
+ return delCount, err
+ }
+
+ if err := c.DeleteCurrent(); err != nil {
+ return delCount, err
+ }
+ limit--
+ delCount++
+ }
+
+ return
+}
+
+type IdentityRootRelation struct{}
+
+func (i *IdentityRootRelation) RootNum2Num(rootNum RootNum, tx kv.Tx) (num Num, err error) {
+ return Num(rootNum), nil
+}
+
+var IdentityRootRelationInstance = &IdentityRootRelation{}
diff --git a/erigon-lib/state/appendable_interfaces.go b/erigon-lib/state/appendable_interfaces.go
new file mode 100644
index 00000000000..e90081c01c3
--- /dev/null
+++ b/erigon-lib/state/appendable_interfaces.go
@@ -0,0 +1,106 @@
+package state
+
+import (
+ "context"
+
+ "github.com/erigontech/erigon-lib/common/background"
+ "github.com/erigontech/erigon-lib/kv"
+ "github.com/erigontech/erigon-lib/recsplit"
+ ae "github.com/erigontech/erigon-lib/state/appendable_extras"
+)
+
+type RootNum = ae.RootNum
+type Num = ae.Num
+type Id = ae.Id
+type AppendableId = ae.AppendableId
+type Bytes = ae.Bytes
+
+// Freezer takes hot data (e.g. from db) and transforms it
+// to snapshot cold data.
+// pattern is SetCollector ; and then call Freeze
+type Freezer interface {
+ // baseNumFrom/To represent num which the snapshot should range
+ // this doesn't check if the snapshot can be created or not. It's the responsibilty of the caller
+ // to ensure this.
+ Freeze(ctx context.Context, from, to RootNum, db kv.RoDB) error
+ SetCollector(coll Collector)
+}
+
+type Collector func(values []byte) error
+
+/** index building **/
+
+type AccessorIndexBuilder interface {
+ Build(ctx context.Context, from, to RootNum, p *background.Progress) (*recsplit.Index, error)
+ AllowsOrdinalLookupByNum() bool
+}
+
+type StartRoTx[T EntityTxI] interface {
+ BeginFilesRo() T
+}
+
+type EntityTxI interface {
+ // value, value from snapshot?, error
+ Get(entityNum Num, tx kv.Tx) (Bytes, bool, error)
+ Prune(ctx context.Context, to RootNum, limit uint64, tx kv.RwTx) (uint64, error)
+ Unwind(ctx context.Context, from RootNum, tx kv.RwTx) error
+ Close()
+ Type() CanonicityStrategy
+
+ VisibleFilesMaxRootNum() RootNum
+ VisibleFilesMaxNum() Num
+}
+
+type MarkedTxI interface {
+ EntityTxI
+ GetNc(num Num, hash []byte, tx kv.Tx) ([]byte, error)
+ Put(num Num, hash []byte, value Bytes, tx kv.RwTx) error
+}
+
+type UnmarkedTxI interface {
+ EntityTxI
+ Append(entityNum Num, value Bytes, tx kv.RwTx) error
+}
+
+type AppendingTxI interface {
+ EntityTxI
+ // db only
+ GetNc(entityId Id, tx kv.Tx) (Bytes, error)
+ Append(entityId Id, value Bytes, tx kv.RwTx) error
+
+ // sequence apis
+ IncrementSequence(amount uint64, tx kv.RwTx) (uint64, error)
+ ReadSequence(tx kv.Tx) (uint64, error)
+ ResetSequence(value uint64, tx kv.RwTx) error
+}
+
+/*
+buffer values before writing to db supposed to store only canonical values
+Note that values in buffer are not reflected in Get call.
+*/
+type BufferedTxI interface {
+ EntityTxI
+ Put(Num, Bytes) error
+ Flush(context.Context, kv.RwTx) error
+}
+
+type CanonicityStrategy uint8
+
+const (
+ // canonicalTbl & valsTbl
+ Marked CanonicityStrategy = iota
+
+ /*
+ valsTbl; storing only canonical values
+ unwinds are rare or values arrive far apart
+ and so unwind doesn't need to be very performant.
+ */
+ Unmarked
+ /*
+ valsTbl;
+ unwinds are frequent and values arrive at high cadence
+ so need to have very performant unwinds.
+ */
+ Appending
+ Buffered
+)
diff --git a/erigon-lib/state/domain_shared.go b/erigon-lib/state/domain_shared.go
index 62a22e3fa95..811b7476753 100644
--- a/erigon-lib/state/domain_shared.go
+++ b/erigon-lib/state/domain_shared.go
@@ -475,7 +475,7 @@ func (sd *SharedDomains) replaceShortenedKeysInBranch(prefix []byte, branch comm
len(branch) == 0 ||
sd.aggTx.TxNumsInFiles(kv.StateDomains...) == 0 ||
bytes.Equal(prefix, keyCommitmentState) ||
- ((fEndTxNum-fStartTxNum)/sd.aggTx.a.StepSize())%2 != 0 { // this checks if file has even number of steps, singular files does not transform values.
+ ((fEndTxNum-fStartTxNum)/sd.aggTx.StepSize())%2 != 0 { // this checks if file has even number of steps, singular files does not transform values.
return branch, nil // do not transform, return as is
}
@@ -517,7 +517,7 @@ func (sd *SharedDomains) replaceShortenedKeysInBranch(prefix []byte, branch comm
// Optimised key referencing a state file record (file number and offset within the file)
storagePlainKey, found := sto.lookupByShortenedKey(key, storageGetter)
if !found {
- s0, s1 := fStartTxNum/sd.aggTx.a.StepSize(), fEndTxNum/sd.aggTx.a.StepSize()
+ s0, s1 := fStartTxNum/sd.aggTx.StepSize(), fEndTxNum/sd.aggTx.StepSize()
sd.logger.Crit("replace back lost storage full key", "shortened", fmt.Sprintf("%x", key),
"decoded", fmt.Sprintf("step %d-%d; offt %d", s0, s1, decodeShorterKey(key)))
return nil, fmt.Errorf("replace back lost storage full key: %x", key)
diff --git a/erigon-lib/state/files_item.go b/erigon-lib/state/files_item.go
index 0c81513781a..ddfc9cfc066 100644
--- a/erigon-lib/state/files_item.go
+++ b/erigon-lib/state/files_item.go
@@ -72,9 +72,13 @@ type FilesItem interface {
var _ FilesItem = (*filesItem)(nil)
func newFilesItem(startTxNum, endTxNum, stepSize uint64) *filesItem {
+ return newFilesItemWithFrozenSteps(startTxNum, endTxNum, stepSize, config3.StepsInFrozenFile)
+}
+
+func newFilesItemWithFrozenSteps(startTxNum, endTxNum, stepSize uint64, stepsInFrozenFile uint64) *filesItem {
startStep := startTxNum / stepSize
endStep := endTxNum / stepSize
- frozen := endStep-startStep == config3.StepsInFrozenFile
+ frozen := endStep-startStep >= stepsInFrozenFile
return &filesItem{startTxNum: startTxNum, endTxNum: endTxNum, frozen: frozen}
}
diff --git a/erigon-lib/state/proto_appendable.go b/erigon-lib/state/proto_appendable.go
new file mode 100644
index 00000000000..e3745d0982d
--- /dev/null
+++ b/erigon-lib/state/proto_appendable.go
@@ -0,0 +1,285 @@
+package state
+
+import (
+ "context"
+ "fmt"
+ "sort"
+
+ "github.com/erigontech/erigon-lib/common/background"
+ "github.com/erigontech/erigon-lib/downloader/snaptype"
+ "github.com/erigontech/erigon-lib/kv"
+ "github.com/erigontech/erigon-lib/log/v3"
+ "github.com/erigontech/erigon-lib/recsplit"
+ "github.com/erigontech/erigon-lib/seg"
+ ae "github.com/erigontech/erigon-lib/state/appendable_extras"
+
+ btree2 "github.com/tidwall/btree"
+)
+
+/*
+ProtoAppendable with basic functionality it's not intended to be used directly.
+Can be embedded in other marker/relational/appendable entities.
+*/
+type ProtoAppendable struct {
+ freezer Freezer
+
+ a ae.AppendableId
+ builders []AccessorIndexBuilder
+ dirtyFiles *btree2.BTreeG[*filesItem]
+ _visible visibleFiles
+
+ strategy CanonicityStrategy
+
+ logger log.Logger
+}
+
+func NewProto(a ae.AppendableId, builders []AccessorIndexBuilder, freezer Freezer, logger log.Logger) *ProtoAppendable {
+ return &ProtoAppendable{
+ a: a,
+ builders: builders,
+ freezer: freezer,
+ dirtyFiles: btree2.NewBTreeGOptions(filesItemLess, btree2.Options{Degree: 128, NoLocks: false}),
+ logger: logger,
+ }
+}
+
+// func (a *ProtoEntity) DirtyFilesMaxRootNum() ae.RootNum {
+// latest, found := a.dirtyFiles.Max()
+// if latest == nil || !found {
+// return 0
+// }
+// return ae.RootNum(latest.endTxNum)
+// }
+
+func (a *ProtoAppendable) RecalcVisibleFiles(toRootNum RootNum) {
+ a._visible = calcVisibleFiles(a.dirtyFiles, AccessorHashMap, false, uint64(toRootNum))
+}
+
+func (a *ProtoAppendable) IntegrateDirtyFiles(files []*filesItem) {
+ for _, item := range files {
+ a.dirtyFiles.Set(item)
+ }
+}
+
+func (a *ProtoAppendable) BuildFiles(ctx context.Context, from, to RootNum, db kv.RoDB, ps *background.ProgressSet) (dirtyFiles []*filesItem, err error) {
+ log.Debug("freezing %s from %d to %d", a.a.Name(), from, to)
+ calcFrom, calcTo := from, to
+ var canFreeze bool
+ cfg := a.a.SnapshotConfig()
+ for {
+ calcFrom, calcTo, canFreeze = ae.GetFreezingRange(calcFrom, calcTo, a.a)
+ if !canFreeze {
+ break
+ }
+
+ log.Debug("freezing %s from %d to %d", a.a.Name(), calcFrom, calcTo)
+ path := ae.SnapFilePath(a.a, snaptype.Version(1), calcFrom, calcTo)
+ sn, err := seg.NewCompressor(ctx, "Snapshot "+a.a.Name(), path, a.a.Dirs().Tmp, seg.DefaultCfg, log.LvlTrace, a.logger)
+ if err != nil {
+ return dirtyFiles, err
+ }
+ defer sn.Close()
+
+ {
+ a.freezer.SetCollector(func(values []byte) error {
+ // TODO: look at block_Snapshots.go#dumpRange
+ // when snapshot is non-frozen range, it AddsUncompressedword (fast creation)
+ // else AddWord.
+ // But BuildFiles perhaps only used for fast builds...and merge is for slow builds.
+ return sn.AddUncompressedWord(values)
+ })
+ if err = a.freezer.Freeze(ctx, calcFrom, calcTo, db); err != nil {
+ return dirtyFiles, err
+ }
+ }
+
+ {
+ p := ps.AddNew(path, 1)
+ defer ps.Delete(p)
+
+ if err := sn.Compress(); err != nil {
+ return dirtyFiles, err
+ }
+ sn.Close()
+ sn = nil
+ ps.Delete(p)
+
+ }
+
+ valuesDecomp, err := seg.NewDecompressor(path)
+ if err != nil {
+ return dirtyFiles, err
+ }
+
+ df := newFilesItemWithFrozenSteps(uint64(calcFrom), uint64(calcTo), cfg.MinimumSize, cfg.StepsInFrozenFile())
+ df.decompressor = valuesDecomp
+
+ indexes := make([]*recsplit.Index, len(a.builders))
+ for i, ib := range a.builders {
+ p := &background.Progress{}
+ ps.Add(p)
+ recsplitIdx, err := ib.Build(ctx, calcFrom, calcTo, p)
+ if err != nil {
+ return dirtyFiles, err
+ }
+
+ indexes[i] = recsplitIdx
+ }
+ // TODO: add support for multiple indexes in filesItem.
+ df.index = indexes[0]
+ dirtyFiles = append(dirtyFiles, df)
+
+ calcFrom = calcTo
+ calcTo = to
+ }
+
+ return dirtyFiles, nil
+}
+
+func (a *ProtoAppendable) Close() {
+ var toClose []*filesItem
+ a.dirtyFiles.Walk(func(items []*filesItem) bool {
+ toClose = append(toClose, items...)
+ return true
+ })
+ for _, item := range toClose {
+ item.closeFiles()
+ a.dirtyFiles.Delete(item)
+ }
+}
+
+// proto_appendable_rotx
+
+type ProtoAppendableTx struct {
+ id AppendableId
+ files visibleFiles
+ a *ProtoAppendable
+
+ readers []*recsplit.IndexReader
+}
+
+func (a *ProtoAppendable) BeginFilesRo() *ProtoAppendableTx {
+ for i := range a._visible {
+ if a._visible[i].src.frozen {
+ a._visible[i].src.refcount.Add(1)
+ }
+ }
+
+ return &ProtoAppendableTx{
+ id: a.a,
+ files: a._visible,
+ a: a,
+ }
+}
+
+func (a *ProtoAppendableTx) Close() {
+ if a.files == nil {
+ return
+ }
+ files := a.files
+ a.files = nil
+ for i := range files {
+ src := files[i].src
+ if src == nil || src.frozen {
+ continue
+ }
+ refCnt := src.refcount.Add(-1)
+ if refCnt == 0 && src.canDelete.Load() {
+ src.closeFilesAndRemove()
+ }
+ }
+
+ for i := range a.readers {
+ a.readers[i].Close()
+ }
+ a.readers = nil
+}
+
+func (a *ProtoAppendableTx) StatelessIdxReader(i int) *recsplit.IndexReader {
+ if a.readers == nil {
+ a.readers = make([]*recsplit.IndexReader, len(a.files))
+ }
+
+ r := a.readers[i]
+ if r == nil {
+ r = a.files[i].src.index.GetReaderFromPool()
+ a.readers[i] = r
+ }
+
+ return r
+}
+
+func (a *ProtoAppendableTx) Type() CanonicityStrategy {
+ return a.a.strategy
+}
+
+func (a *ProtoAppendableTx) Garbage(merged *filesItem) (outs []*filesItem) {
+ if merged == nil {
+ return
+ }
+
+ a.a.dirtyFiles.Walk(func(item []*filesItem) bool {
+ for _, item := range item {
+ if item.frozen {
+ continue
+ }
+ if item.isSubsetOf(merged) {
+ outs = append(outs, item)
+ }
+ if item.isBefore(merged) && hasCoverVisibleFile(a.files, item) {
+ outs = append(outs, item)
+ }
+ }
+ return true
+ })
+ return outs
+}
+
+func (a *ProtoAppendableTx) VisibleFilesMaxRootNum() RootNum {
+ lasti := len(a.files) - 1
+ if lasti < 0 {
+ return 0
+ }
+
+ return RootNum(a.files[lasti].src.endTxNum)
+}
+
+func (a *ProtoAppendableTx) VisibleFilesMaxNum() Num {
+ lasti := len(a.files) - 1
+ if lasti < 0 {
+ return 0
+ }
+ idx := a.files[lasti].src.index
+ return Num(idx.BaseDataID() + idx.KeyCount())
+}
+
+func (a *ProtoAppendableTx) LookupFile(entityNum Num, tx kv.Tx) (b Bytes, found bool, err error) {
+ ap := a.a
+ lastNum := a.VisibleFilesMaxNum()
+ if entityNum < lastNum && ap.builders[0].AllowsOrdinalLookupByNum() {
+ var word []byte
+ index := sort.Search(len(ap._visible), func(i int) bool {
+ idx := ap._visible[i].src.index
+ return idx.BaseDataID()+idx.KeyCount() > uint64(entityNum)
+ })
+ if index == -1 {
+ return nil, false, fmt.Errorf("entity get error: snapshot expected but now found: (%s, %d)", ap.a.Name(), entityNum)
+ }
+ indexR := a.StatelessIdxReader(index)
+ id := int64(entityNum) - int64(indexR.BaseDataID())
+ if id < 0 {
+ a.a.logger.Error("ordinal lookup by negative num", "entityNum", entityNum, "index", index, "indexR.BaseDataID()", indexR.BaseDataID())
+ panic("ordinal lookup by negative num")
+ }
+ offset := indexR.OrdinalLookup(uint64(id))
+ g := a.files[index].src.decompressor.MakeGetter()
+ g.Reset(offset)
+ if g.HasNext() {
+ word, _ = g.Next(word[:0])
+ return word, true, nil
+ }
+ return nil, false, fmt.Errorf("entity get error: %s expected %d in snapshot %s but not found", ap.a.Name(), entityNum, ap._visible[index].src.decompressor.FileName1)
+ }
+
+ return nil, false, nil
+}
diff --git a/erigon-lib/state/relations.go b/erigon-lib/state/relations.go
new file mode 100644
index 00000000000..ab1323550a3
--- /dev/null
+++ b/erigon-lib/state/relations.go
@@ -0,0 +1,72 @@
+package state
+
+import (
+ "github.com/erigontech/erigon-lib/kv"
+ ae "github.com/erigontech/erigon-lib/state/appendable_extras"
+)
+
+//// relations
+
+var _ RootRelationI = (*PointRelation)(nil)
+var _ RootRelationI = (*ManyToOneRelation)(nil)
+var _ RootRelationI = (*OneToManyRelation)(nil)
+
+// 1:1; RootNum = Num
+type PointRelation struct{}
+
+func (r *PointRelation) RootNum2Num(inp RootNum, tx kv.Tx) (Num, error) {
+ return Num(inp), nil
+}
+
+//////////////////////////////////////////////
+
+// many:1; EntityEnds tbl: start RootNum -> num
+// also id == num here (only canonical data)
+type ManyToOneRelation struct {
+ entityEndsTbl string
+}
+
+func (r *ManyToOneRelation) RootNum2Num(inp RootNum, tx kv.Tx) (Num, error) {
+ c, err := tx.Cursor(r.entityEndsTbl)
+ if err != nil {
+ return 0, err
+ }
+ defer c.Close()
+
+ _, v, err := c.Seek(inp.EncTo8Bytes())
+ if err != nil {
+ return 0, err
+ }
+
+ return Num(ae.Decode64FromBytes(v, true)), nil
+}
+
+//////////////////////////////////////////////
+
+// 1:many; with MaxNumTbl
+// e.g. borevents
+// also id == num here (only canonical data)
+type OneToManyRelation struct {
+ maxNumTbl string
+}
+
+// returns 1st num present in the given inp RootNum
+func (r *OneToManyRelation) RootNum2Num(inp RootNum, tx kv.Tx) (Num, error) {
+ prevMaxNum, err := tx.GetOne(r.maxNumTbl, ae.EncToBytes(uint64(inp)-1, true))
+ if err != nil {
+ return 0, err
+ }
+
+ return Num(ae.Decode64FromBytes(prevMaxNum, true) + 1), nil
+}
+
+// // 1: many; pure function
+// // e.g: spans
+// // no non-canonical data (id == num)
+// type OneToManyPureRelation struct {
+// fn func(inp RootNum) Num
+// }
+
+// func (r *OneToManyPureRelation) RootNum2Num(inp RootNum, tx kv.Tx) (Num, error) {
+// return r.fn(inp), nil
+// }
diff --git a/erigon-lib/state/simple_freezer.go b/erigon-lib/state/simple_freezer.go
new file mode 100644
index 00000000000..7f1f3dcd993
--- /dev/null
+++ b/erigon-lib/state/simple_freezer.go
@@ -0,0 +1,59 @@
+package state
+
+import (
+ "bytes"
+ "context"
+
+ "github.com/erigontech/erigon-lib/common/hexutil"
+ "github.com/erigontech/erigon-lib/kv"
+)
+
+// default freezer implementation for relational appendables (which have RootRelationI)
+// implements Freezer interface
+type SimpleRelationalFreezer struct {
+ rel RootRelationI
+ valsTbl string
+ coll Collector
+}
+
+func (sf *SimpleRelationalFreezer) Freeze(ctx context.Context, from, to RootNum, db kv.RoDB) error {
+ tx, err := db.BeginRo(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ _entityIdFrom, err := sf.rel.RootNum2Num(from, tx)
+ if err != nil {
+ return err
+ }
+ entityIdFrom := hexutil.EncodeTs(uint64(_entityIdFrom))
+
+ _entityIdTo, err := sf.rel.RootNum2Num(to, tx)
+ if err != nil {
+ return err
+ }
+ entityIdTo := hexutil.EncodeTs(uint64(_entityIdTo))
+
+ cursor, err := tx.Cursor(sf.valsTbl)
+ if err != nil {
+ return err
+ }
+
+ defer cursor.Close()
+
+ // bytes.Compare assume big endianness
+ for k, v, err := cursor.Seek(entityIdFrom); k != nil && bytes.Compare(k, entityIdTo) < 0; k, _, err = cursor.Next() {
+ if err != nil {
+ return err
+ }
+ if err := sf.coll(v); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (sf *SimpleRelationalFreezer) SetCollector(coll Collector) {
+ sf.coll = coll
+}
diff --git a/erigon-lib/state/simple_index_builder.go b/erigon-lib/state/simple_index_builder.go
new file mode 100644
index 00000000000..6c43288f27d
--- /dev/null
+++ b/erigon-lib/state/simple_index_builder.go
@@ -0,0 +1,296 @@
+package state
+
+import (
+ "context"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/erigontech/erigon-lib/common/background"
+ "github.com/erigontech/erigon-lib/common/dbg"
+ "github.com/erigontech/erigon-lib/downloader/snaptype"
+ "github.com/erigontech/erigon-lib/kv/stream"
+ "github.com/erigontech/erigon-lib/log/v3"
+ "github.com/erigontech/erigon-lib/recsplit"
+ "github.com/erigontech/erigon-lib/seg"
+ ae "github.com/erigontech/erigon-lib/state/appendable_extras"
+)
+
+// interfaces defined here are not required to be implemented in
+// appendables. These are just helpers when SimpleAccessorBuilder is used. Also can be used to provide some structure
+// to build more custom indexes.
+type IndexInputDataQuery interface {
+ GetStream(ctx context.Context) stream.Trio[[]byte, uint64, uint64] // (word/value, index, offset)
+ GetBaseDataId() uint64
+ GetCount() uint64
+ Close()
+}
+
+type IndexKeyFactory interface {
+ // IndexInputDataQuery elements passed here to create key for index
+ // `value` is snapshot element;
+ // `index` is the corresponding sequence number in the file.
+ Refresh()
+ Make(value []byte, index uint64) []byte
+ Close()
+}
+
+type AccessorArgs struct {
+ Enums bool
+ LessFalsePositives bool
+ Nofsync bool
+
+ BucketSize int
+ LeafSize uint16
+
+ // other config options for recsplit
+}
+
+func NewAccessorArgs(enums, lessFalsePositives bool) *AccessorArgs {
+ return &AccessorArgs{
+ Enums: enums,
+ LessFalsePositives: lessFalsePositives,
+ BucketSize: recsplit.DefaultBucketSize,
+ LeafSize: recsplit.DefaultLeafSize,
+ }
+}
+
+// simple accessor index
+// goes through all (value, index) in segment file
+// creates a recsplit index with
+// index.key = kf(value, index)
+// and index.value = offset
+type SimpleAccessorBuilder struct {
+ args *AccessorArgs
+ indexPos uint64
+ id AppendableId
+ kf IndexKeyFactory
+ fetcher FirstEntityNumFetcher
+ logger log.Logger
+}
+
+type FirstEntityNumFetcher = func(from, to RootNum, seg *seg.Decompressor) Num
+
+var _ AccessorIndexBuilder = (*SimpleAccessorBuilder)(nil)
+
+func NewSimpleAccessorBuilder(args *AccessorArgs, id AppendableId, logger log.Logger, options ...AccessorBuilderOptions) *SimpleAccessorBuilder {
+ b := &SimpleAccessorBuilder{
+ args: args,
+ id: id,
+ logger: logger,
+ }
+
+ for _, opt := range options {
+ opt(b)
+ }
+
+ if b.kf == nil {
+ b.kf = &SimpleIndexKeyFactory{num: make([]byte, binary.MaxVarintLen64)}
+ }
+
+ if b.fetcher == nil {
+ // assume rootnum and num is same
+ b.fetcher = func(from, to RootNum, seg *seg.Decompressor) Num {
+ return Num(from)
+ }
+ }
+
+ return b
+}
+
+type AccessorBuilderOptions func(*SimpleAccessorBuilder)
+
+func WithIndexPos(indexPos uint64) AccessorBuilderOptions {
+ return func(s *SimpleAccessorBuilder) {
+ if int(s.indexPos) >= len(s.id.IndexPrefix()) {
+ panic("indexPos greater than indexPrefix length")
+ }
+ s.indexPos = indexPos
+ }
+}
+
+func WithIndexKeyFactory(factory IndexKeyFactory) AccessorBuilderOptions {
+ return func(s *SimpleAccessorBuilder) {
+ s.kf = factory
+ }
+}
+
+func (s *SimpleAccessorBuilder) SetAccessorArgs(args *AccessorArgs) {
+ s.args = args
+}
+
+// TODO: this is supposed to go away once we start storing first entity num in the snapshot
+func (s *SimpleAccessorBuilder) SetFirstEntityNumFetcher(fetcher FirstEntityNumFetcher) {
+ s.fetcher = fetcher
+}
+
+func (s *SimpleAccessorBuilder) GetInputDataQuery(from, to RootNum) *DecompressorIndexInputDataQuery {
+ sgname := ae.SnapFilePath(s.id, snaptype.Version(1), from, to)
+ decomp, _ := seg.NewDecompressor(sgname)
+ return &DecompressorIndexInputDataQuery{decomp: decomp, baseDataId: uint64(s.fetcher(from, to, decomp))}
+}
+
+func (s *SimpleAccessorBuilder) SetIndexKeyFactory(factory IndexKeyFactory) {
+ s.kf = factory
+}
+
+func (s *SimpleAccessorBuilder) AllowsOrdinalLookupByNum() bool {
+ return s.args.Enums
+}
+
+func (s *SimpleAccessorBuilder) Build(ctx context.Context, from, to RootNum, p *background.Progress) (i *recsplit.Index, err error) {
+ defer func() {
+ if rec := recover(); rec != nil {
+ err = fmt.Errorf("%s: at=%d-%d, %v, %s", s.id.IndexPrefix()[s.indexPos], from, to, rec, dbg.Stack())
+ }
+ }()
+ iidq := s.GetInputDataQuery(from, to)
+ defer iidq.Close()
+ idxFile := ae.IdxFilePath(s.id, snaptype.Version(1), from, to, s.indexPos)
+
+ keyCount := iidq.GetCount()
+ if p != nil {
+ p.Name.Store(&idxFile)
+ p.Total.Store(keyCount)
+ }
+ salt, err := s.id.Salt()
+ if err != nil {
+ return nil, err
+ }
+
+ rs, err := recsplit.NewRecSplit(recsplit.RecSplitArgs{
+ KeyCount: int(keyCount),
+ Enums: s.args.Enums,
+ BucketSize: s.args.BucketSize,
+ LeafSize: s.args.LeafSize,
+ IndexFile: idxFile,
+ Salt: &salt,
+ NoFsync: s.args.Nofsync,
+ TmpDir: s.id.Dirs().Tmp,
+ LessFalsePositives: s.args.LessFalsePositives,
+ BaseDataID: iidq.GetBaseDataId(),
+ }, s.logger)
+ if err != nil {
+ return nil, err
+ }
+
+ s.kf.Refresh()
+ defer s.kf.Close()
+
+ defer iidq.decomp.EnableReadAhead().DisableReadAhead()
+
+ for {
+ stream := iidq.GetStream(ctx)
+ defer stream.Close()
+ for stream.HasNext() {
+ word, index, offset, err := stream.Next()
+ if err != nil {
+ return nil, err
+ }
+ if p != nil {
+ p.Processed.Add(1)
+ }
+ key := s.kf.Make(word, index)
+ if err = rs.AddKey(key, offset); err != nil {
+ return nil, err
+ }
+ select {
+ case <-ctx.Done():
+ stream.Close()
+ return nil, ctx.Err()
+ default:
+ }
+ }
+ stream.Close()
+ if err = rs.Build(ctx); err != nil {
+ p.Processed.CompareAndSwap(p.Processed.Load(), 0)
+ // collision handling
+ if errors.Is(err, recsplit.ErrCollision) {
+ rs.ResetNextSalt()
+ continue
+ }
+ return nil, err
+ }
+
+ break
+ }
+
+ return recsplit.OpenIndex(idxFile)
+
+}
+
+type DecompressorIndexInputDataQuery struct {
+ decomp *seg.Decompressor
+ baseDataId uint64
+}
+
+// return trio: word, index, offset,
+func (d *DecompressorIndexInputDataQuery) GetStream(ctx context.Context) stream.Trio[[]byte, uint64, uint64] {
+ // open seg if not yet
+ return &seg_stream{ctx: ctx, g: d.decomp.MakeGetter(), word: make([]byte, 0, 4096)}
+}
+
+func (d *DecompressorIndexInputDataQuery) GetBaseDataId() uint64 {
+ // discuss: adding base data id to snapshotfile?
+ // or might need to add callback to get first basedataid...
+ return 0
+ //return d.from
+}
+
+func (d *DecompressorIndexInputDataQuery) GetCount() uint64 {
+ return uint64(d.decomp.Count())
+}
+
+func (d *DecompressorIndexInputDataQuery) Close() {
+ d.decomp.Close()
+ d.decomp = nil
+}
+
+type seg_stream struct {
+ g *seg.Getter
+ i, offset uint64
+ ctx context.Context
+ word []byte
+}
+
+func (s *seg_stream) Next() (word []byte, index uint64, offset uint64, err error) {
+ // check if ctx is done...
+ if s.g.HasNext() {
+ word, nextPos := s.g.Next(s.word[:0])
+ defer func() {
+ s.offset = nextPos
+ s.i++
+ }()
+ return word, s.i, s.offset, nil
+ }
+ return nil, 0, 0, io.EOF
+}
+
+func (s *seg_stream) HasNext() bool {
+ return s.g.HasNext()
+}
+
+func (s *seg_stream) Close() {
+ if s.g == nil {
+ return
+ }
+ s.g = nil
+}
+
+// index key factory "manufacturing" index keys only
+type SimpleIndexKeyFactory struct {
+ num []byte
+}
+
+func (n *SimpleIndexKeyFactory) Refresh() {}
+
+func (n *SimpleIndexKeyFactory) Make(_ []byte, index uint64) []byte {
+ // everywhere except heimdall indexes, which use BigIndian format
+ nm := binary.PutUvarint(n.num, index)
+ return n.num[:nm]
+}
+
+func (n *SimpleIndexKeyFactory) Close() {
+ n.num = []byte{}
+}
diff --git a/erigon-lib/state/squeeze.go b/erigon-lib/state/squeeze.go
index 79e9a942db4..72f6595c6c9 100644
--- a/erigon-lib/state/squeeze.go
+++ b/erigon-lib/state/squeeze.go
@@ -117,19 +117,19 @@ func (at *AggregatorRoTx) SqueezeCommitmentFiles() error {
name: kv.AccountsDomain,
values: MergeRange{"", true, 0, math.MaxUint64},
history: HistoryRanges{},
- aggStep: at.a.StepSize(),
+ aggStep: at.StepSize(),
},
kv.StorageDomain: {
name: kv.StorageDomain,
values: MergeRange{"", true, 0, math.MaxUint64},
history: HistoryRanges{},
- aggStep: at.a.StepSize(),
+ aggStep: at.StepSize(),
},
kv.CommitmentDomain: {
name: kv.CommitmentDomain,
values: MergeRange{"", true, 0, math.MaxUint64},
history: HistoryRanges{},
- aggStep: at.a.StepSize(),
+ aggStep: at.StepSize(),
},
},
}
diff --git a/erigon-lib/tools/golangci_lint.sh b/erigon-lib/tools/golangci_lint.sh
index 9e1a7577979..77f989ec44b 100755
--- a/erigon-lib/tools/golangci_lint.sh
+++ b/erigon-lib/tools/golangci_lint.sh
@@ -2,7 +2,7 @@
scriptDir=$(dirname "${BASH_SOURCE[0]}")
scriptName=$(basename "${BASH_SOURCE[0]}")
-version="v1.63.4"
+version="v1.64.6"
if [[ "$1" == "--install-deps" ]]
then
diff --git a/eth/backend.go b/eth/backend.go
index c5de8c5c287..a2a2f744eb4 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -63,6 +63,7 @@ import (
"github.com/erigontech/erigon-lib/downloader/downloadercfg"
"github.com/erigontech/erigon-lib/downloader/downloadergrpc"
"github.com/erigontech/erigon-lib/downloader/snaptype"
+ "github.com/erigontech/erigon-lib/event"
protodownloader "github.com/erigontech/erigon-lib/gointerfaces/downloaderproto"
"github.com/erigontech/erigon-lib/gointerfaces/grpcutil"
remote "github.com/erigontech/erigon-lib/gointerfaces/remoteproto"
@@ -131,7 +132,6 @@ import (
"github.com/erigontech/erigon/turbo/silkworm"
"github.com/erigontech/erigon/turbo/snapshotsync/freezeblocks"
stages2 "github.com/erigontech/erigon/turbo/stages"
- "github.com/erigontech/erigon/turbo/stages/headerdownload"
"github.com/erigontech/erigon/txnprovider"
"github.com/erigontech/erigon/txnprovider/shutter"
"github.com/erigontech/erigon/txnprovider/txpool"
@@ -174,9 +174,10 @@ type Ethereum struct {
rpcFilters *rpchelper.Filters
rpcDaemonStateCache kvcache.Cache
- miningSealingQuit chan struct{}
- pendingBlocks chan *types.Block
- minedBlocks chan *types.Block
+ miningSealingQuit chan struct{}
+ pendingBlocks chan *types.Block
+ minedBlocks chan *types.Block
+ minedBlockObservers *event.Observers[*types.Block]
sentryCtx context.Context
sentryCancel context.CancelFunc
@@ -288,6 +289,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger
blockBuilderNotifyNewTxns: make(chan struct{}, 1),
miningSealingQuit: make(chan struct{}),
minedBlocks: make(chan *types.Block, 1),
+ minedBlockObservers: event.NewObservers[*types.Block](),
logger: logger,
stopNode: func() error {
return stack.Close()
@@ -760,6 +762,9 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger
recents = bor.Recents
signatures = bor.Signatures
}
+
+ astridEnabled := chainConfig.Bor != nil && config.PolygonSync
+
// proof-of-work mining
mining := stagedsync.New(
config.Sync,
@@ -800,6 +805,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger
stagedsync.StageSendersCfg(backend.chainDB, chainConfig, config.Sync, false, dirs.Tmp, config.Prune, blockReader, backend.sentriesClient.Hd),
stagedsync.StageMiningExecCfg(backend.chainDB, miner, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, nil, 0, txnProvider, blockReader),
stagedsync.StageMiningFinishCfg(backend.chainDB, *backend.chainConfig, backend.engine, miner, backend.miningSealingQuit, backend.blockReader, latestBlockBuiltStore),
+ astridEnabled,
), stagedsync.MiningUnwindOrder, stagedsync.MiningPruneOrder,
logger, stages.ModeBlockProduction)
@@ -845,7 +851,9 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger
),
stagedsync.StageSendersCfg(backend.chainDB, chainConfig, config.Sync, false, dirs.Tmp, config.Prune, blockReader, backend.sentriesClient.Hd),
stagedsync.StageMiningExecCfg(backend.chainDB, miningStatePos, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, interrupt, param.PayloadId, txnProvider, blockReader),
- stagedsync.StageMiningFinishCfg(backend.chainDB, *backend.chainConfig, backend.engine, miningStatePos, backend.miningSealingQuit, backend.blockReader, latestBlockBuiltStore)), stagedsync.MiningUnwindOrder, stagedsync.MiningPruneOrder, logger, stages.ModeBlockProduction)
+ stagedsync.StageMiningFinishCfg(backend.chainDB, *backend.chainConfig, backend.engine, miningStatePos, backend.miningSealingQuit, backend.blockReader, latestBlockBuiltStore),
+ astridEnabled,
+ ), stagedsync.MiningUnwindOrder, stagedsync.MiningPruneOrder, logger, stages.ModeBlockProduction)
// We start the mining step
if err := stages2.MiningStep(ctx, backend.chainDB, proposingSync, tmpdir, logger); err != nil {
return nil, err
@@ -900,19 +908,14 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger
backend.sentriesClient.Bd.AddToPrefetch(b.Header(), b.RawBody())
}
+ backend.minedBlockObservers.Notify(b)
+
//p2p
//backend.sentriesClient.BroadcastNewBlock(context.Background(), b, b.Difficulty())
//rpcdaemon
if err := backend.miningRPC.BroadcastMinedBlock(b); err != nil {
logger.Error("txpool rpc mined block broadcast", "err", err)
}
- logger.Trace("BroadcastMinedBlock successful", "number", b.Number(), "GasUsed", b.GasUsed(), "txn count", b.Transactions().Len())
- backend.sentriesClient.PropagateNewBlockHashes(ctx, []headerdownload.Announce{
- {
- Number: b.NumberU64(),
- Hash: b.Hash(),
- },
- })
case b := <-backend.pendingBlocks:
if err := backend.miningRPC.BroadcastPendingBlock(b); err != nil {
@@ -960,6 +963,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger
statusDataProvider,
backend.stopNode,
&engineAPISwitcher{backend: backend},
+ backend,
)
backend.syncUnwindOrder = stagedsync.PolygonSyncUnwindOrder
backend.syncPruneOrder = stagedsync.PolygonSyncPruneOrder
@@ -1051,6 +1055,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger
heimdallService,
backend.notifications,
backend.engineBackendRPC,
+ backend,
)
// we need to initiate download before the heimdall services start rather than
@@ -1393,6 +1398,10 @@ func (s *Ethereum) StartMining(ctx context.Context, db kv.RwDB, stateDiffClient
func (s *Ethereum) IsMining() bool { return s.config.Miner.Enabled }
+func (s *Ethereum) RegisterMinedBlockObserver(callback func(msg *types.Block)) event.UnregisterFunc {
+ return s.minedBlockObservers.Register(callback)
+}
+
func (s *Ethereum) ChainKV() kv.RwDB { return s.chainDB }
func (s *Ethereum) NetVersion() (uint64, error) { return s.networkID, nil }
func (s *Ethereum) NetPeerCount() (uint64, error) {
diff --git a/eth/stagedsync/stage_execute.go b/eth/stagedsync/stage_execute.go
index 9c0c9fda10f..e3aaa5976c7 100644
--- a/eth/stagedsync/stage_execute.go
+++ b/eth/stagedsync/stage_execute.go
@@ -32,7 +32,6 @@ import (
"github.com/erigontech/erigon-lib/common"
"github.com/erigontech/erigon-lib/common/datadir"
"github.com/erigontech/erigon-lib/common/dbg"
- "github.com/erigontech/erigon-lib/config3"
"github.com/erigontech/erigon-lib/kv"
"github.com/erigontech/erigon-lib/kv/rawdbv3"
"github.com/erigontech/erigon-lib/kv/temporal"
@@ -398,7 +397,7 @@ func PruneExecutionStage(s *PruneState, tx kv.RwTx, cfg ExecuteBlockCfg, ctx con
}
defer tx.Rollback()
}
- if s.ForwardProgress > config3.MaxReorgDepthV3 && !cfg.syncCfg.AlwaysGenerateChangesets {
+ if s.ForwardProgress > uint64(dbg.MaxReorgDepth) && !cfg.syncCfg.AlwaysGenerateChangesets {
// (chunkLen is 8Kb) * (1_000 chunks) = 8mb
// Some blocks on bor-mainnet have 400 chunks of diff = 3mb
var pruneDiffsLimitOnChainTip = 1_000
@@ -410,7 +409,7 @@ func PruneExecutionStage(s *PruneState, tx kv.RwTx, cfg ExecuteBlockCfg, ctx con
if err := rawdb.PruneTable(
tx,
kv.ChangeSets3,
- s.ForwardProgress-config3.MaxReorgDepthV3,
+ s.ForwardProgress-uint64(dbg.MaxReorgDepth),
ctx,
pruneDiffsLimitOnChainTip,
pruneTimeout,
diff --git a/eth/stagedsync/stage_polygon_sync.go b/eth/stagedsync/stage_polygon_sync.go
index 4c091b60196..f32bf197fe8 100644
--- a/eth/stagedsync/stage_polygon_sync.go
+++ b/eth/stagedsync/stage_polygon_sync.go
@@ -72,6 +72,7 @@ func NewPolygonSyncStageCfg(
userUnwindTypeOverrides []string,
notifications *shards.Notifications,
engineAPISwitcher sync.EngineAPISwitcher,
+ minedBlockReg sync.MinedBlockObserverRegistrar,
) PolygonSyncStageCfg {
// using a buffered channel to preserve order of tx actions,
// do not expect to ever have more than 50 goroutines blocking on this channel
@@ -138,7 +139,7 @@ func NewPolygonSyncStageCfg(
syncStore,
blockLimit,
)
- events := polygonsync.NewTipEvents(logger, p2pService, heimdallService)
+ events := polygonsync.NewTipEvents(logger, p2pService, heimdallService, minedBlockReg)
sync := polygonsync.NewSync(
config,
logger,
diff --git a/eth/stagedsync/stagebuilder.go b/eth/stagedsync/stagebuilder.go
index cbde166e0ea..c39dfd3d019 100644
--- a/eth/stagedsync/stagebuilder.go
+++ b/eth/stagedsync/stagebuilder.go
@@ -43,6 +43,7 @@ func MiningStages(
sendersCfg SendersCfg,
execCfg MiningExecCfg,
finish MiningFinishCfg,
+ astridEnabled bool,
) []*Stage {
return []*Stage{
{
@@ -71,6 +72,7 @@ func MiningStages(
Prune: func(p *PruneState, tx kv.RwTx, logger log.Logger) error {
return nil
},
+ Disabled: astridEnabled,
},
{
ID: stages.MiningExecution,
diff --git a/eth/stagedsync/stagedsynctest/harness.go b/eth/stagedsync/stagedsynctest/harness.go
index 69865589fe6..2f5003bcecc 100644
--- a/eth/stagedsync/stagedsynctest/harness.go
+++ b/eth/stagedsync/stagedsynctest/harness.go
@@ -103,6 +103,7 @@ func InitHarness(ctx context.Context, t *testing.T, cfg HarnessCfg) Harness {
stagedsync.SendersCfg{},
stagedsync.MiningExecCfg{},
stagedsync.MiningFinishCfg{},
+ false,
)
miningSync := stagedsync.New(
ethconfig.Defaults.Sync,
diff --git a/go.mod b/go.mod
index d72f408c8f9..94bcbad5b3b 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,8 @@
module github.com/erigontech/erigon
-go 1.22.12
+go 1.23.0
+
+toolchain go1.23.6
replace github.com/erigontech/erigon-lib => ./erigon-lib
@@ -90,12 +92,12 @@ require (
github.com/xsleonard/go-merkle v1.1.0
go.uber.org/mock v0.5.0
go.uber.org/zap v1.27.0
- golang.org/x/crypto v0.33.0
- golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c
- golang.org/x/net v0.35.0
- golang.org/x/sync v0.11.0
- golang.org/x/sys v0.30.0
- golang.org/x/time v0.10.0
+ golang.org/x/crypto v0.36.0
+ golang.org/x/exp v0.0.0-20250305212735-054e65f0b394
+ golang.org/x/net v0.37.0
+ golang.org/x/sync v0.12.0
+ golang.org/x/sys v0.31.0
+ golang.org/x/time v0.11.0
google.golang.org/grpc v1.69.4
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1
google.golang.org/protobuf v1.36.5
@@ -281,9 +283,9 @@ require (
go.uber.org/dig v1.18.0 // indirect
go.uber.org/fx v1.23.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
- golang.org/x/mod v0.23.0 // indirect
- golang.org/x/text v0.22.0 // indirect
- golang.org/x/tools v0.30.0
+ golang.org/x/mod v0.24.0 // indirect
+ golang.org/x/text v0.23.0 // indirect
+ golang.org/x/tools v0.31.0
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
lukechampine.com/blake3 v1.3.0 // indirect
modernc.org/libc v1.61.13 // indirect
diff --git a/go.sum b/go.sum
index 7f766659859..1f1fa3b59a3 100644
--- a/go.sum
+++ b/go.sum
@@ -971,8 +971,8 @@ golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
-golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus=
-golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
+golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
+golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -983,8 +983,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c h1:KL/ZBHXgKGVmuZBZ01Lt57yE5ws8ZPSkkihmEyq7FXc=
-golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU=
+golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw=
+golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -1012,8 +1012,8 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM=
-golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
+golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
+golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1063,8 +1063,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
-golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
-golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
+golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
+golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -1090,8 +1090,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
-golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
+golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1161,8 +1161,8 @@ golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
-golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
+golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
@@ -1186,14 +1186,14 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
-golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
+golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
+golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4=
-golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
+golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -1250,8 +1250,8 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY=
-golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY=
+golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU=
+golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/params/config.go b/params/config.go
index a0bdcccfbb6..e717630ad4e 100644
--- a/params/config.go
+++ b/params/config.go
@@ -113,6 +113,7 @@ var (
TerminalTotalDifficultyPassed: true,
ShanghaiTime: big.NewInt(0),
CancunTime: big.NewInt(0),
+ PragueTime: big.NewInt(0),
Ethash: new(chain.EthashConfig),
}
diff --git a/polygon/bor/spanner.go b/polygon/bor/spanner.go
index 78a27552d4d..ff79f0e0f35 100644
--- a/polygon/bor/spanner.go
+++ b/polygon/bor/spanner.go
@@ -18,6 +18,7 @@ package bor
import (
"encoding/hex"
+ "errors"
"math/big"
"github.com/erigontech/erigon-lib/log/v3"
@@ -128,6 +129,10 @@ func (c *ChainSpanner) GetCurrentProducers(spanId uint64, chain ChainHeaderReade
span := chain.BorSpan(spanId)
+ if span == nil {
+ return nil, errors.New("no span found")
+ }
+
producers := make([]*valset.Validator, len(span.SelectedProducers))
for i := range span.SelectedProducers {
producers[i] = &span.SelectedProducers[i]
diff --git a/polygon/sync/service.go b/polygon/sync/service.go
index 8138433decf..4a2536feb75 100644
--- a/polygon/sync/service.go
+++ b/polygon/sync/service.go
@@ -51,6 +51,7 @@ func NewService(
heimdallService *heimdall.Service,
notifications *shards.Notifications,
engineAPISwitcher EngineAPISwitcher,
+ minedBlockReg MinedBlockObserverRegistrar,
) *Service {
borConfig := chainConfig.Bor.(*borcfg.BorConfig)
@@ -77,7 +78,7 @@ func NewService(
blockLimit,
)
ccBuilderFactory := NewCanonicalChainBuilderFactory(chainConfig, borConfig, heimdallService, signaturesCache)
- events := NewTipEvents(logger, p2pService, heimdallService)
+ events := NewTipEvents(logger, p2pService, heimdallService, minedBlockReg)
sync := NewSync(
config,
logger,
diff --git a/polygon/sync/sync.go b/polygon/sync/sync.go
index bd2cf823ba5..9ae01d2ba03 100644
--- a/polygon/sync/sync.go
+++ b/polygon/sync/sync.go
@@ -393,6 +393,11 @@ func (s *Sync) applyNewBlockOnTip(ctx context.Context, event EventNewBlock, ccb
return err
}
+ if event.Source == EventSourceBlockProducer {
+ go s.publishNewBlock(ctx, event.NewBlock)
+ go s.p2pService.PublishNewBlockHashes(event.NewBlock)
+ }
+
if event.Source == EventSourceP2PNewBlock {
// https://github.com/ethereum/devp2p/blob/master/caps/eth.md#block-propagation
// devp2p spec: when a NewBlock announcement message is received from a peer, the client first verifies the
diff --git a/polygon/sync/tip_events.go b/polygon/sync/tip_events.go
index c9175a518fb..d5016ee825b 100644
--- a/polygon/sync/tip_events.go
+++ b/polygon/sync/tip_events.go
@@ -42,6 +42,7 @@ type EventSource string
const EventSourceP2PNewBlockHashes EventSource = "p2p-new-block-hashes-source"
const EventSourceP2PNewBlock EventSource = "p2p-new-block-source"
+const EventSourceBlockProducer EventSource = "mined-block-producer"
type EventTopic string
@@ -114,25 +115,31 @@ type heimdallObserverRegistrar interface {
RegisterMilestoneObserver(callback func(*heimdall.Milestone), opts ...heimdall.ObserverOption) event.UnregisterFunc
}
-func NewTipEvents(logger log.Logger, p2pReg p2pObserverRegistrar, heimdallReg heimdallObserverRegistrar) *TipEvents {
+type MinedBlockObserverRegistrar interface {
+ RegisterMinedBlockObserver(callback func(*types.Block)) event.UnregisterFunc
+}
+
+func NewTipEvents(logger log.Logger, p2pReg p2pObserverRegistrar, heimdallReg heimdallObserverRegistrar, minedBlockReg MinedBlockObserverRegistrar) *TipEvents {
heimdallEventsChannel := NewEventChannel[Event](10, WithEventChannelLogging(logger, log.LvlTrace, EventTopicHeimdall.String()))
p2pEventsChannel := NewEventChannel[Event](1000, WithEventChannelLogging(logger, log.LvlTrace, EventTopicP2P.String()))
compositeEventsChannel := NewTipEventsCompositeChannel(heimdallEventsChannel, p2pEventsChannel)
return &TipEvents{
- logger: logger,
- events: compositeEventsChannel,
- p2pObserverRegistrar: p2pReg,
- heimdallObserverRegistrar: heimdallReg,
- blockEventsSpamGuard: newBlockEventsSpamGuard(logger),
+ logger: logger,
+ events: compositeEventsChannel,
+ p2pObserverRegistrar: p2pReg,
+ heimdallObserverRegistrar: heimdallReg,
+ minedBlockObserverRegistrar: minedBlockReg,
+ blockEventsSpamGuard: newBlockEventsSpamGuard(logger),
}
}
type TipEvents struct {
- logger log.Logger
- events *TipEventsCompositeChannel
- p2pObserverRegistrar p2pObserverRegistrar
- heimdallObserverRegistrar heimdallObserverRegistrar
- blockEventsSpamGuard blockEventsSpamGuard
+ logger log.Logger
+ events *TipEventsCompositeChannel
+ p2pObserverRegistrar p2pObserverRegistrar
+ heimdallObserverRegistrar heimdallObserverRegistrar
+ minedBlockObserverRegistrar MinedBlockObserverRegistrar
+ blockEventsSpamGuard blockEventsSpamGuard
}
func (te *TipEvents) Events() <-chan Event {
@@ -142,6 +149,23 @@ func (te *TipEvents) Events() <-chan Event {
func (te *TipEvents) Run(ctx context.Context) error {
te.logger.Info(syncLogPrefix("running tip events component"))
+ newMinedBlockObserverCancel := te.minedBlockObserverRegistrar.RegisterMinedBlockObserver(func(msg *types.Block) {
+ te.logger.Trace(
+ "[tip-events] mined block event received from block producer",
+ "hash", msg.Hash(),
+ "number", msg.NumberU64(),
+ )
+
+ te.events.PushEvent(Event{
+ Type: EventTypeNewBlock,
+ newBlock: EventNewBlock{
+ NewBlock: msg,
+ Source: EventSourceBlockProducer,
+ },
+ })
+ })
+ defer newMinedBlockObserverCancel()
+
newBlockObserverCancel := te.p2pObserverRegistrar.RegisterNewBlockObserver(func(message *p2p.DecodedInboundMessage[*eth.NewBlockPacket]) {
block := message.Decoded.Block
diff --git a/rules.go b/rules.go
index 6f378fa5579..e59315efe6d 100644
--- a/rules.go
+++ b/rules.go
@@ -60,7 +60,7 @@ func txDeferRollback(m dsl.Matcher) {
Where(!m["rollback"].Text.Matches(`defer .*\.Rollback()`)).
//At(m["rollback"]).
Report(`Add "defer $tx.Rollback()" right after transaction creation error check.
- If you are in the loop - consider use "$db.View" or "$db.Update" or extract whole transaction to function.
+ If you are in the loop - consider using "$db.View" or "$db.Update" or extract whole transaction to function.
Without rollback in defer - app can deadlock on error or panic.
Rules are in ./rules.go file.
`)
@@ -126,12 +126,12 @@ func mismatchingUnlock(m dsl.Matcher) {
m.Match(`$mu.Lock(); defer $mu.$unlock()`).
Where(m["unlock"].Text == "RUnlock").
At(m["unlock"]).
- Report(`maybe $mu.Unlock() was intended?
+ Report(`Did you mean $mu.Unlock()?
Rules are in ./rules.go file.`)
m.Match(`$mu.RLock(); defer $mu.$unlock()`).
Where(m["unlock"].Text == "Unlock").
At(m["unlock"]).
- Report(`maybe $mu.RUnlock() was intended?
+ Report(`Did you mean $mu.RUnlock()?
Rules are in ./rules.go file.`)
}
diff --git a/turbo/engineapi/engine_api_methods.go b/turbo/engineapi/engine_api_methods.go
index 37544518ad4..e68a7a2fbc4 100644
--- a/turbo/engineapi/engine_api_methods.go
+++ b/turbo/engineapi/engine_api_methods.go
@@ -6,6 +6,7 @@ import (
libcommon "github.com/erigontech/erigon-lib/common"
"github.com/erigontech/erigon-lib/common/hexutil"
+ "github.com/erigontech/erigon-lib/gointerfaces/txpoolproto"
"github.com/erigontech/erigon/params"
"github.com/erigontech/erigon/cl/clparams"
@@ -166,3 +167,9 @@ func (e *EngineServer) ExchangeCapabilities(fromCl []string) []string {
return ourCapabilities
}
+
+func (e *EngineServer) GetBlobsV1(ctx context.Context, blobHashes []libcommon.Hash) ([]*txpoolproto.BlobAndProofV1, error) {
+ e.logger.Debug("[engine_getBlobsV1] Received Reuqust", "hashes", len(blobHashes))
+ return e.getBlobs(ctx, blobHashes)
+
+}
diff --git a/turbo/engineapi/engine_server.go b/turbo/engineapi/engine_server.go
index c4f39d7817f..6bb7728f316 100644
--- a/turbo/engineapi/engine_server.go
+++ b/turbo/engineapi/engine_server.go
@@ -31,7 +31,9 @@ import (
"github.com/erigontech/erigon-lib/common/math"
"github.com/erigontech/erigon-lib/gointerfaces"
execution "github.com/erigontech/erigon-lib/gointerfaces/executionproto"
+ "github.com/erigontech/erigon-lib/gointerfaces/txpoolproto"
txpool "github.com/erigontech/erigon-lib/gointerfaces/txpoolproto"
+ "github.com/erigontech/erigon-lib/gointerfaces/typesproto"
"github.com/erigontech/erigon-lib/kv"
"github.com/erigontech/erigon-lib/kv/kvcache"
"github.com/erigontech/erigon-lib/log/v3"
@@ -68,6 +70,7 @@ type EngineServer struct {
test bool
caplin bool // we need to send errors for caplin.
executionService execution.ExecutionClient
+ txpool txpool.TxpoolClient // needed for getBlobs
chainRW eth1_chain_reader.ChainReaderWriterEth1
lock sync.Mutex
@@ -118,11 +121,9 @@ func (e *EngineServer) Start(
e.engineLogSpamer.Start(ctx)
}
base := jsonrpc.NewBaseApi(filters, stateCache, blockReader, httpConfig.WithDatadir, httpConfig.EvmCallTimeout, engineReader, httpConfig.Dirs, nil)
-
ethImpl := jsonrpc.NewEthAPI(base, db, eth, txPool, mining, httpConfig.Gascap, httpConfig.Feecap, httpConfig.ReturnDataLimit, httpConfig.AllowUnprotectedTxs, httpConfig.MaxGetProofRewindBlockCount, httpConfig.WebsocketSubscribeLogsChannelSize, e.logger)
+ e.txpool = txPool
- // engineImpl := NewEngineAPI(base, db, engineBackend)
- // e.startEngineMessageHandler()
apiList := []rpc.API{
{
Namespace: "eth",
@@ -885,6 +886,22 @@ func (e *EngineServer) SetConsuming(consuming bool) {
e.consuming.Store(consuming)
}
+func (e *EngineServer) getBlobs(ctx context.Context, blobHashes []libcommon.Hash) ([]*txpoolproto.BlobAndProofV1, error) {
+ if len(blobHashes) > 128 {
+ return nil, &engine_helpers.TooLargeRequestErr
+ }
+ req := &txpool.GetBlobsRequest{BlobHashes: make([]*typesproto.H256, len(blobHashes))}
+ for i := range blobHashes {
+ req.BlobHashes[i] = gointerfaces.ConvertHashToH256(blobHashes[i])
+ }
+ res, err := e.txpool.GetBlobs(ctx, req)
+ if err != nil {
+ return nil, err
+ }
+
+ return res.BlobsAndProofs, nil
+}
+
func waitForStuff(maxWait time.Duration, waitCondnF func() (bool, error)) (bool, error) {
shouldWait, err := waitCondnF()
if err != nil || !shouldWait {
diff --git a/turbo/engineapi/engine_server_test.go b/turbo/engineapi/engine_server_test.go
new file mode 100644
index 00000000000..a69658b1dd0
--- /dev/null
+++ b/turbo/engineapi/engine_server_test.go
@@ -0,0 +1,128 @@
+// Copyright 2025 The Erigon Authors
+// This file is part of Erigon.
+//
+// Erigon is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// Erigon is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with Erigon. If not, see .
+
+package engineapi
+
+import (
+ "bytes"
+ "math/big"
+ "testing"
+
+ "github.com/erigontech/erigon-lib/direct"
+ "github.com/holiman/uint256"
+ "github.com/stretchr/testify/require"
+
+ "github.com/erigontech/erigon-lib/common"
+ sentry "github.com/erigontech/erigon-lib/gointerfaces/sentryproto"
+ txpool "github.com/erigontech/erigon-lib/gointerfaces/txpoolproto"
+
+ "github.com/erigontech/erigon-lib/kv/kvcache"
+ "github.com/erigontech/erigon-lib/log/v3"
+ "github.com/erigontech/erigon-lib/rlp"
+ "github.com/erigontech/erigon-lib/wrap"
+ "github.com/erigontech/erigon/cmd/rpcdaemon/cli/httpcfg"
+ "github.com/erigontech/erigon/cmd/rpcdaemon/rpcdaemontest"
+ "github.com/erigontech/erigon/cmd/rpcdaemon/rpcservices"
+ "github.com/erigontech/erigon/core"
+ "github.com/erigontech/erigon/core/types"
+ "github.com/erigontech/erigon/eth/ethconfig"
+ "github.com/erigontech/erigon/eth/protocols/eth"
+
+ "github.com/erigontech/erigon/rpc/rpccfg"
+ "github.com/erigontech/erigon/turbo/jsonrpc"
+ "github.com/erigontech/erigon/turbo/rpchelper"
+ "github.com/erigontech/erigon/turbo/stages"
+ "github.com/erigontech/erigon/turbo/stages/mock"
+)
+
+// Do 1 step to start txPool
+func oneBlockStep(mockSentry *mock.MockSentry, require *require.Assertions, t *testing.T) {
+ chain, err := core.GenerateChain(mockSentry.ChainConfig, mockSentry.Genesis, mockSentry.Engine, mockSentry.DB, 1 /*number of blocks:*/, func(i int, b *core.BlockGen) {
+ b.SetCoinbase(common.Address{1})
+ })
+ require.NoError(err)
+
+ // Send NewBlock message
+ b, err := rlp.EncodeToBytes(ð.NewBlockPacket{
+ Block: chain.TopBlock,
+ TD: big.NewInt(1), // This is ignored anyway
+ })
+ require.NoError(err)
+
+ mockSentry.ReceiveWg.Add(1)
+ for _, err = range mockSentry.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK_66, Data: b, PeerId: mockSentry.PeerId}) {
+ require.NoError(err)
+ }
+ // Send all the headers
+ b, err = rlp.EncodeToBytes(ð.BlockHeadersPacket66{
+ RequestId: 1,
+ BlockHeadersPacket: chain.Headers,
+ })
+ require.NoError(err)
+ mockSentry.ReceiveWg.Add(1)
+ for _, err = range mockSentry.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: mockSentry.PeerId}) {
+ require.NoError(err)
+ }
+ mockSentry.ReceiveWg.Wait() // Wait for all messages to be processed before we proceed
+
+ initialCycle, firstCycle := mock.MockInsertAsInitialCycle, false
+ if err := stages.StageLoopIteration(mockSentry.Ctx, mockSentry.DB, wrap.TxContainer{}, mockSentry.Sync, initialCycle, firstCycle, log.New(), mockSentry.BlockReader, nil); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func newBaseApiForTest(m *mock.MockSentry) *jsonrpc.BaseAPI {
+ stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
+ return jsonrpc.NewBaseApi(nil, stateCache, m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs, nil)
+}
+
+func TestGetBlobsV1(t *testing.T) {
+ logger := log.New()
+ buf := bytes.NewBuffer(nil)
+ mockSentry, require := mock.MockWithTxPoolCancun(t), require.New(t)
+ oneBlockStep(mockSentry, require, t)
+
+ wrappedTxn := types.MakeWrappedBlobTxn(uint256.MustFromBig(mockSentry.ChainConfig.ChainID))
+ txn, err := types.SignTx(wrappedTxn, *types.LatestSignerForChainID(mockSentry.ChainConfig.ChainID), mockSentry.Key)
+ require.NoError(err)
+ dt := &wrappedTxn.Tx.DynamicFeeTransaction
+ v, r, s := txn.RawSignatureValues()
+ dt.V.Set(v)
+ dt.R.Set(r)
+ dt.S.Set(s)
+
+ ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, mockSentry)
+ txPool := txpool.NewTxpoolClient(conn)
+ ff := rpchelper.New(ctx, rpchelper.DefaultFiltersConfig, nil, txPool, txpool.NewMiningClient(conn), func() {}, mockSentry.Log)
+ api := jsonrpc.NewEthAPI(newBaseApiForTest(mockSentry), mockSentry.DB, nil, txPool, nil, 5000000, ethconfig.Defaults.RPCTxFeeCap, 100_000, false, 100_000, 128, logger)
+
+ executionRpc := direct.NewExecutionClientDirect(mockSentry.Eth1ExecutionService)
+ eth := rpcservices.NewRemoteBackend(nil, mockSentry.DB, mockSentry.BlockReader)
+ engineServer := NewEngineServer(mockSentry.Log, mockSentry.ChainConfig, executionRpc, mockSentry.HeaderDownload(), nil, false, true, false, true)
+ engineServer.Start(ctx, &httpcfg.HttpCfg{}, mockSentry.DB, mockSentry.BlockReader, ff, nil, mockSentry.Engine, eth, txPool, nil)
+
+ err = wrappedTxn.MarshalBinaryWrapped(buf)
+ require.NoError(err)
+ _, err = api.SendRawTransaction(ctx, buf.Bytes())
+ require.NoError(err)
+
+ blobsResp, err := engineServer.GetBlobsV1(ctx, wrappedTxn.Tx.BlobVersionedHashes)
+ require.NoError(err)
+ require.Equal(blobsResp[0].Blob, wrappedTxn.Blobs[0][:])
+ require.Equal(blobsResp[1].Blob, wrappedTxn.Blobs[1][:])
+ require.Equal(blobsResp[0].Proof, wrappedTxn.Proofs[0][:])
+ require.Equal(blobsResp[1].Proof, wrappedTxn.Proofs[1][:])
+}
diff --git a/turbo/engineapi/engine_types/jsonrpc.go b/turbo/engineapi/engine_types/jsonrpc.go
index b212ec9c59d..3872085139b 100644
--- a/turbo/engineapi/engine_types/jsonrpc.go
+++ b/turbo/engineapi/engine_types/jsonrpc.go
@@ -81,6 +81,12 @@ type BlobsBundleV1 struct {
Blobs []hexutil.Bytes `json:"blobs" gencodec:"required"`
}
+// BlobAndProofV1 holds one item for engine_getBlobsV1
+type BlobAndProofV1 struct {
+ Blob hexutil.Bytes `json:"blob" gencodec:"required"`
+ Proof hexutil.Bytes `json:"proof" gencodec:"required"`
+}
+
type ExecutionPayloadBody struct {
Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals" gencodec:"required"`
diff --git a/turbo/engineapi/interface.go b/turbo/engineapi/interface.go
index e336cbb6c96..8cfd3d85adc 100644
--- a/turbo/engineapi/interface.go
+++ b/turbo/engineapi/interface.go
@@ -21,6 +21,7 @@ import (
"github.com/erigontech/erigon-lib/common"
"github.com/erigontech/erigon-lib/common/hexutil"
+ "github.com/erigontech/erigon-lib/gointerfaces/txpoolproto"
"github.com/erigontech/erigon/turbo/engineapi/engine_types"
)
@@ -40,4 +41,5 @@ type EngineAPI interface {
GetPayloadBodiesByHashV1(ctx context.Context, hashes []common.Hash) ([]*engine_types.ExecutionPayloadBody, error)
GetPayloadBodiesByRangeV1(ctx context.Context, start, count hexutil.Uint64) ([]*engine_types.ExecutionPayloadBody, error)
GetClientVersionV1(ctx context.Context, callerVersion *engine_types.ClientVersionV1) ([]engine_types.ClientVersionV1, error)
+ GetBlobsV1(ctx context.Context, blobHashes []common.Hash) ([]*txpoolproto.BlobAndProofV1, error)
}
diff --git a/turbo/jsonrpc/debug_api.go b/turbo/jsonrpc/debug_api.go
index 9a8225b9dcb..d867bb659da 100644
--- a/turbo/jsonrpc/debug_api.go
+++ b/turbo/jsonrpc/debug_api.go
@@ -520,7 +520,7 @@ func (api *PrivateDebugAPIImpl) GetRawTransaction(ctx context.Context, txnHash c
if err != nil {
return nil, err
}
- txNum = txNumNextBlock - 1
+ txNum = txNumNextBlock
}
} else {
blockNum, ok, err = api._blockReader.EventLookup(ctx, tx, txnHash)
diff --git a/turbo/jsonrpc/eth_accounts.go b/turbo/jsonrpc/eth_accounts.go
index 1730d23b007..b4e6492c66b 100644
--- a/turbo/jsonrpc/eth_accounts.go
+++ b/turbo/jsonrpc/eth_accounts.go
@@ -116,15 +116,13 @@ func (api *APIImpl) GetCode(ctx context.Context, address libcommon.Address, bloc
// GetStorageAt implements eth_getStorageAt. Returns the value from a storage position at a given address.
func (api *APIImpl) GetStorageAt(ctx context.Context, address libcommon.Address, index string, blockNrOrHash rpc.BlockNumberOrHash) (string, error) {
var empty []byte
- indexBytes := hexutil.FromHex(index)
-
- if len(indexBytes) > 32 {
- return "", errors.New("unable to decode storage key: hex string too long, want at most 32 bytes")
+ if err := hexutil.IsValidQuantity(index); err != nil {
+ return "", errors.New("unable to decode storage key: " + err.Error())
}
- tx, err1 := api.db.BeginTemporalRo(ctx)
- if err1 != nil {
- return hexutil.Encode(libcommon.LeftPadBytes(empty, 32)), err1
+ tx, err := api.db.BeginTemporalRo(ctx)
+ if err != nil {
+ return hexutil.Encode(libcommon.LeftPadBytes(empty, 32)), err
}
defer tx.Rollback()
diff --git a/turbo/jsonrpc/eth_api_test.go b/turbo/jsonrpc/eth_api_test.go
index 69fbb4b180c..0aac7ea64b8 100644
--- a/turbo/jsonrpc/eth_api_test.go
+++ b/turbo/jsonrpc/eth_api_test.go
@@ -96,7 +96,7 @@ func TestGetStorageAt_ByBlockNumber_WithRequireCanonicalDefault(t *testing.T) {
api := NewEthAPI(newBaseApiForTest(m), m.DB, nil, nil, nil, 5000000, ethconfig.Defaults.RPCTxFeeCap, 100_000, false, 100_000, 128, log.New())
addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7")
- result, err := api.GetStorageAt(context.Background(), addr, common.HexToHash("0x0").String(), rpc.BlockNumberOrHashWithNumber(0))
+ result, err := api.GetStorageAt(context.Background(), addr, "0x0", rpc.BlockNumberOrHashWithNumber(0))
if err != nil {
t.Errorf("calling GetStorageAt: %v", err)
}
@@ -110,7 +110,7 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalDefault(t *testing.T) {
api := NewEthAPI(newBaseApiForTest(m), m.DB, nil, nil, nil, 5000000, ethconfig.Defaults.RPCTxFeeCap, 100_000, false, 100_000, 128, log.New())
addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7")
- result, err := api.GetStorageAt(context.Background(), addr, common.HexToHash("0x0").String(), rpc.BlockNumberOrHashWithHash(m.Genesis.Hash(), false))
+ result, err := api.GetStorageAt(context.Background(), addr, "0x0", rpc.BlockNumberOrHashWithHash(m.Genesis.Hash(), false))
if err != nil {
t.Errorf("calling GetStorageAt: %v", err)
}
@@ -124,7 +124,7 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalTrue(t *testing.T) {
api := NewEthAPI(newBaseApiForTest(m), m.DB, nil, nil, nil, 5000000, ethconfig.Defaults.RPCTxFeeCap, 100_000, false, 100_000, 128, log.New())
addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7")
- result, err := api.GetStorageAt(context.Background(), addr, common.HexToHash("0x0").String(), rpc.BlockNumberOrHashWithHash(m.Genesis.Hash(), true))
+ result, err := api.GetStorageAt(context.Background(), addr, "0x0", rpc.BlockNumberOrHashWithHash(m.Genesis.Hash(), true))
if err != nil {
t.Errorf("calling GetStorageAt: %v", err)
}
@@ -144,7 +144,7 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalDefault_BlockNotFoundError
}
offChainBlock := offChain.Blocks[0]
- if _, err := api.GetStorageAt(context.Background(), addr, common.HexToHash("0x0").String(), rpc.BlockNumberOrHashWithHash(offChainBlock.Hash(), false)); err != nil {
+ if _, err := api.GetStorageAt(context.Background(), addr, "0x0", rpc.BlockNumberOrHashWithHash(offChainBlock.Hash(), false)); err != nil {
if fmt.Sprintf("%v", err) != fmt.Sprintf("block %s not found", offChainBlock.Hash().String()[2:]) {
t.Errorf("wrong error: %v", err)
}
@@ -165,7 +165,7 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalTrue_BlockNotFoundError(t
}
offChainBlock := offChain.Blocks[0]
- if _, err := api.GetStorageAt(context.Background(), addr, common.HexToHash("0x0").String(), rpc.BlockNumberOrHashWithHash(offChainBlock.Hash(), true)); err != nil {
+ if _, err := api.GetStorageAt(context.Background(), addr, "0x0", rpc.BlockNumberOrHashWithHash(offChainBlock.Hash(), true)); err != nil {
if fmt.Sprintf("%v", err) != fmt.Sprintf("block %s not found", offChainBlock.Hash().String()[2:]) {
t.Errorf("wrong error: %v", err)
}
@@ -182,7 +182,7 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalDefault_NonCanonicalBlock(
orphanedBlock := orphanedChain[0].Blocks[0]
- result, err := api.GetStorageAt(context.Background(), addr, common.HexToHash("0x0").String(), rpc.BlockNumberOrHashWithHash(orphanedBlock.Hash(), false))
+ result, err := api.GetStorageAt(context.Background(), addr, "0x0", rpc.BlockNumberOrHashWithHash(orphanedBlock.Hash(), false))
if err != nil {
if fmt.Sprintf("%v", err) != fmt.Sprintf("hash %s is not currently canonical", orphanedBlock.Hash().String()[2:]) {
t.Errorf("wrong error: %v", err)
@@ -201,7 +201,7 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalTrue_NonCanonicalBlock(t *
orphanedBlock := orphanedChain[0].Blocks[0]
- if _, err := api.GetStorageAt(context.Background(), addr, common.HexToHash("0x0").String(), rpc.BlockNumberOrHashWithHash(orphanedBlock.Hash(), true)); err != nil {
+ if _, err := api.GetStorageAt(context.Background(), addr, "0x0", rpc.BlockNumberOrHashWithHash(orphanedBlock.Hash(), true)); err != nil {
if fmt.Sprintf("%v", err) != fmt.Sprintf("hash %s is not currently canonical", orphanedBlock.Hash().String()[2:]) {
t.Errorf("wrong error: %v", err)
}
diff --git a/turbo/jsonrpc/eth_receipts.go b/turbo/jsonrpc/eth_receipts.go
index adeb007f95b..a940e288d88 100644
--- a/turbo/jsonrpc/eth_receipts.go
+++ b/turbo/jsonrpc/eth_receipts.go
@@ -303,6 +303,10 @@ func (api *BaseAPI) getLogsV3(ctx context.Context, tx kv.TemporalTx, begin, end
return logs, err
}
+ if len(events) == 0 {
+ continue
+ }
+
borLogs, err := api.borReceiptGenerator.GenerateBorLogs(ctx, events, txNumsReader, tx, header, chainConfig, txIndex, len(logs))
if err != nil {
return logs, err
@@ -481,7 +485,7 @@ func (api *APIImpl) GetTransactionReceipt(ctx context.Context, txnHash common.Ha
if err != nil {
return nil, err
}
- txNum = txNumNextBlock - 1
+ txNum = txNumNextBlock
}
} else {
blockNum, ok, err = api._blockReader.EventLookup(ctx, tx, txnHash)
diff --git a/turbo/jsonrpc/eth_txs.go b/turbo/jsonrpc/eth_txs.go
index d2642d44783..3de286fc931 100644
--- a/turbo/jsonrpc/eth_txs.go
+++ b/turbo/jsonrpc/eth_txs.go
@@ -66,7 +66,7 @@ func (api *APIImpl) GetTransactionByHash(ctx context.Context, txnHash common.Has
if err != nil {
return nil, err
}
- txNum = txNumNextBlock - 1
+ txNum = txNumNextBlock
}
} else {
blockNum, ok, err = api._blockReader.EventLookup(ctx, tx, txnHash)
diff --git a/turbo/jsonrpc/trace_adhoc.go b/turbo/jsonrpc/trace_adhoc.go
index 4368d7c778d..bdcfc82e3e7 100644
--- a/turbo/jsonrpc/trace_adhoc.go
+++ b/turbo/jsonrpc/trace_adhoc.go
@@ -838,7 +838,7 @@ func (api *TraceAPIImpl) ReplayTransaction(ctx context.Context, txHash libcommon
if err != nil {
return nil, err
}
- txNum = txNumNextBlock - 1
+ txNum = txNumNextBlock
}
} else {
blockNum, ok, err = api._blockReader.EventLookup(ctx, tx, txHash)
diff --git a/turbo/jsonrpc/trace_filtering.go b/turbo/jsonrpc/trace_filtering.go
index 33aa1358007..e9022f58776 100644
--- a/turbo/jsonrpc/trace_filtering.go
+++ b/turbo/jsonrpc/trace_filtering.go
@@ -87,7 +87,7 @@ func (api *TraceAPIImpl) Transaction(ctx context.Context, txHash common.Hash, ga
if err != nil {
return nil, err
}
- txNum = txNumNextBlock - 1
+ txNum = txNumNextBlock
}
} else {
blockNumber, ok, err = api._blockReader.EventLookup(ctx, tx, txHash)
diff --git a/turbo/logging/logging.go b/turbo/logging/logging.go
index f2eb8b9f9f8..2796dcae457 100644
--- a/turbo/logging/logging.go
+++ b/turbo/logging/logging.go
@@ -61,12 +61,16 @@ func SetupLoggerCtx(filePrefix string, ctx *cli.Context,
metrics.DelayLoggingEnabled = ctx.Bool(LogBlockDelayFlag.Name)
- consoleLevel, lErr := tryGetLogLevel(ctx.String(LogConsoleVerbosityFlag.Name))
- if lErr != nil {
- // try verbosity flag
- consoleLevel, lErr = tryGetLogLevel(ctx.String(LogVerbosityFlag.Name))
- if lErr != nil {
- consoleLevel = consoleDefaultLevel
+ consoleLevel := consoleDefaultLevel
+
+ // Priority: LogConsoleVerbosityFlag (if explicitly set) > LogVerbosityFlag (if explicitly set) > default
+ if ctx.IsSet(LogConsoleVerbosityFlag.Name) {
+ if level, err := tryGetLogLevel(ctx.String(LogConsoleVerbosityFlag.Name)); err == nil {
+ consoleLevel = level
+ }
+ } else if ctx.IsSet(LogVerbosityFlag.Name) {
+ if level, err := tryGetLogLevel(ctx.String(LogVerbosityFlag.Name)); err == nil {
+ consoleLevel = level
}
}
diff --git a/turbo/snapshotsync/freezeblocks/block_reader_test.go b/turbo/snapshotsync/freezeblocks/block_reader_test.go
index 9616cf83aa4..4387f6fb367 100644
--- a/turbo/snapshotsync/freezeblocks/block_reader_test.go
+++ b/turbo/snapshotsync/freezeblocks/block_reader_test.go
@@ -27,10 +27,9 @@ import (
"github.com/stretchr/testify/require"
"github.com/erigontech/erigon-lib/chain/networkname"
- "github.com/erigontech/erigon-lib/log/v3"
-
"github.com/erigontech/erigon-lib/common/length"
"github.com/erigontech/erigon-lib/downloader/snaptype"
+ "github.com/erigontech/erigon-lib/log/v3"
"github.com/erigontech/erigon-lib/recsplit"
"github.com/erigontech/erigon-lib/seg"
coresnaptype "github.com/erigontech/erigon/core/snaptype"
diff --git a/turbo/snapshotsync/freezeblocks/block_snapshots.go b/turbo/snapshotsync/freezeblocks/block_snapshots.go
index 69394674cfd..64336f8487a 100644
--- a/turbo/snapshotsync/freezeblocks/block_snapshots.go
+++ b/turbo/snapshotsync/freezeblocks/block_snapshots.go
@@ -151,7 +151,7 @@ type BlockRetire struct {
// shared semaphore with AggregatorV3 to allow only one type of snapshot building at a time
snBuildAllowed *semaphore.Weighted
- workers int
+ workers atomic.Int32
tmpDir string
db kv.RoDB
@@ -181,8 +181,7 @@ func NewBlockRetire(
snBuildAllowed *semaphore.Weighted,
logger log.Logger,
) *BlockRetire {
- return &BlockRetire{
- workers: compressWorkers,
+ r := &BlockRetire{
tmpDir: dirs.Tmp,
dirs: dirs,
blockReader: blockReader,
@@ -196,10 +195,12 @@ func NewBlockRetire(
heimdallStore: heimdallStore,
bridgeStore: bridgeStore,
}
+ r.workers.Store(int32(compressWorkers))
+ return r
}
-func (br *BlockRetire) SetWorkers(workers int) { br.workers = workers }
-func (br *BlockRetire) GetWorkers() int { return br.workers }
+func (br *BlockRetire) SetWorkers(workers int) { br.workers.Store(int32(workers)) }
+func (br *BlockRetire) GetWorkers() int { return int(br.workers.Load()) }
func (br *BlockRetire) IO() (services.FullBlockReader, *blockio.BlockWriter) {
return br.blockReader, br.blockWriter
@@ -266,7 +267,7 @@ func (br *BlockRetire) retireBlocks(ctx context.Context, minBlockNum uint64, max
default:
}
- notifier, logger, blockReader, tmpDir, db, workers := br.notifier, br.logger, br.blockReader, br.tmpDir, br.db, br.workers
+ notifier, logger, blockReader, tmpDir, db, workers := br.notifier, br.logger, br.blockReader, br.tmpDir, br.db, br.workers.Load()
snapshots := br.snapshots()
blockFrom, blockTo, ok := CanRetire(maxBlockNum, minBlockNum, snaptype.Unknown, br.chainConfig)
@@ -280,7 +281,7 @@ func (br *BlockRetire) retireBlocks(ctx context.Context, minBlockNum uint64, max
logger.Log(lvl, "[snapshots] Retire Blocks", "range",
fmt.Sprintf("%s-%s", common2.PrettyCounter(blockFrom), common2.PrettyCounter(blockTo)))
// in future we will do it in background
- if err := DumpBlocks(ctx, blockFrom, blockTo, br.chainConfig, tmpDir, snapshots.Dir(), db, workers, lvl, logger, blockReader); err != nil {
+ if err := DumpBlocks(ctx, blockFrom, blockTo, br.chainConfig, tmpDir, snapshots.Dir(), db, int(workers), lvl, logger, blockReader); err != nil {
return ok, fmt.Errorf("DumpBlocks: %w", err)
}
@@ -293,7 +294,7 @@ func (br *BlockRetire) retireBlocks(ctx context.Context, minBlockNum uint64, max
}
}
- merger := snapshotsync.NewMerger(tmpDir, workers, lvl, db, br.chainConfig, logger)
+ merger := snapshotsync.NewMerger(tmpDir, int(workers), lvl, db, br.chainConfig, logger)
rangesToMerge := merger.FindMergeRanges(snapshots.Ranges(), snapshots.BlocksAvailable())
if len(rangesToMerge) == 0 {
return ok, nil
diff --git a/turbo/snapshotsync/freezeblocks/bor_snapshots.go b/turbo/snapshotsync/freezeblocks/bor_snapshots.go
index 35b2f4e73ed..2f96d058f95 100644
--- a/turbo/snapshotsync/freezeblocks/bor_snapshots.go
+++ b/turbo/snapshotsync/freezeblocks/bor_snapshots.go
@@ -44,9 +44,8 @@ func (br *BlockRetire) retireBorBlocks(ctx context.Context, minBlockNum uint64,
}
snapshots := br.borSnapshots()
-
chainConfig := fromdb.ChainConfig(br.db)
- notifier, logger, blockReader, tmpDir, db, workers := br.notifier, br.logger, br.blockReader, br.tmpDir, br.db, br.workers
+ notifier, logger, blockReader, tmpDir, db, workers := br.notifier, br.logger, br.blockReader, br.tmpDir, br.db, int(br.workers.Load())
blocksRetired := false
diff --git a/turbo/stages/mock/mock_sentry.go b/turbo/stages/mock/mock_sentry.go
index db6203bd52a..012b894a601 100644
--- a/turbo/stages/mock/mock_sentry.go
+++ b/turbo/stages/mock/mock_sentry.go
@@ -502,6 +502,7 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK
stagedsync.StageSendersCfg(mock.DB, mock.ChainConfig, cfg.Sync, false, dirs.Tmp, prune, mock.BlockReader, mock.sentriesClient.Hd),
stagedsync.StageMiningExecCfg(mock.DB, miner, nil, *mock.ChainConfig, mock.Engine, &vm.Config{}, dirs.Tmp, nil, 0, mock.TxPool, mock.BlockReader),
stagedsync.StageMiningFinishCfg(mock.DB, *mock.ChainConfig, mock.Engine, miner, miningCancel, mock.BlockReader, latestBlockBuiltStore),
+ false,
), stagedsync.MiningUnwindOrder, stagedsync.MiningPruneOrder,
logger, stages.ModeBlockProduction)
// We start the mining step
@@ -575,6 +576,7 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK
stagedsync.StageSendersCfg(mock.DB, mock.ChainConfig, cfg.Sync, false, dirs.Tmp, prune, mock.BlockReader, mock.sentriesClient.Hd),
stagedsync.StageMiningExecCfg(mock.DB, miner, nil, *mock.ChainConfig, mock.Engine, &vm.Config{}, dirs.Tmp, nil, 0, mock.TxPool, mock.BlockReader),
stagedsync.StageMiningFinishCfg(mock.DB, *mock.ChainConfig, mock.Engine, miner, miningCancel, mock.BlockReader, latestBlockBuiltStore),
+ false,
),
stagedsync.MiningUnwindOrder,
stagedsync.MiningPruneOrder,
@@ -637,6 +639,22 @@ func MockWithTxPool(t *testing.T) *MockSentry {
return MockWithEverything(t, gspec, key, prune.DefaultMode, ethash.NewFaker(), blockBufferSize, true, false, checkStateRoot)
}
+func MockWithTxPoolCancun(t *testing.T) *MockSentry {
+ funds := big.NewInt(1 * params.Ether)
+ key, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ address := crypto.PubkeyToAddress(key.PublicKey)
+ chainConfig := params.AllProtocolChanges
+ gspec := &types.Genesis{
+ Config: chainConfig,
+ Alloc: types.GenesisAlloc{
+ address: {Balance: funds},
+ },
+ }
+
+ checkStateRoot := true
+ return MockWithEverything(t, gspec, key, prune.DefaultMode, ethash.NewFaker(), blockBufferSize, true, false, checkStateRoot)
+}
+
func MockWithZeroTTD(t *testing.T, withPosDownloader bool) *MockSentry {
funds := big.NewInt(1 * params.Ether)
key, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go
index a956ebbbb13..b42a3777388 100644
--- a/turbo/stages/stageloop.go
+++ b/turbo/stages/stageloop.go
@@ -786,6 +786,7 @@ func NewPolygonSyncStages(
statusDataProvider *sentry.StatusDataProvider,
stopNode func() error,
engineAPISwitcher sync.EngineAPISwitcher,
+ minedBlockReg sync.MinedBlockObserverRegistrar,
) []*stagedsync.Stage {
return stagedsync.PolygonSyncStages(
ctx,
@@ -821,6 +822,7 @@ func NewPolygonSyncStages(
nil, /* userUnwindTypeOverrides */
notifications,
engineAPISwitcher,
+ minedBlockReg,
),
stagedsync.StageSendersCfg(db, chainConfig, config.Sync, false, config.Dirs.Tmp, config.Prune, blockReader, nil),
stagedsync.StageExecuteBlocksCfg(db, config.Prune, config.BatchSize, chainConfig, consensusEngine, &vm.Config{}, notifications, config.StateStream, false, config.Dirs, blockReader, nil, config.Genesis, config.Sync, SilkwormForExecutionStage(silkworm, config)),
diff --git a/txnprovider/txpool/pool.go b/txnprovider/txpool/pool.go
index 2ebfed127d4..62bf380c247 100644
--- a/txnprovider/txpool/pool.go
+++ b/txnprovider/txpool/pool.go
@@ -82,7 +82,7 @@ type Pool interface {
FilterKnownIdHashes(tx kv.Tx, hashes Hashes) (unknownHashes Hashes, err error)
Started() bool
GetRlp(tx kv.Tx, hash []byte) ([]byte, error)
-
+ GetBlobs(blobhashes []common.Hash) ([][]byte, [][]byte)
AddNewGoodPeer(peerID PeerID)
}
@@ -149,6 +149,10 @@ type TxPool struct {
builderNotifyNewTxns func()
logger log.Logger
auths map[common.Address]*metaTxn // All accounts with a pooled authorization
+ blobHashToTxn map[common.Hash]struct {
+ index int
+ txnHash common.Hash
+ }
}
type FeeCalculator interface {
@@ -226,6 +230,10 @@ func New(
newSlotsStreams: newSlotsStreams,
logger: logger,
auths: map[common.Address]*metaTxn{},
+ blobHashToTxn: map[common.Hash]struct {
+ index int
+ txnHash common.Hash
+ }{},
}
if shanghaiTime != nil {
@@ -1415,8 +1423,8 @@ func (p *TxPool) addLocked(mt *metaTxn, announcements *Announcements) txpoolcfg.
}
priceBump := p.cfg.PriceBump
- //Blob txn threshold checks for replace txn
if mt.TxnSlot.Type == BlobTxnType {
+ //Blob txn threshold checks for replace txn
priceBump = p.cfg.BlobPriceBump
blobFeeThreshold, overflow := (&uint256.Int{}).MulDivOverflow(
&found.TxnSlot.BlobFeeCap,
@@ -1429,6 +1437,7 @@ func (p *TxPool) addLocked(mt *metaTxn, announcements *Announcements) txpoolcfg.
}
return txpoolcfg.ReplaceUnderpriced // TODO: This is the same as NotReplaced
}
+
}
//Regular txn threshold checks
@@ -1520,6 +1529,12 @@ func (p *TxPool) addLocked(mt *metaTxn, announcements *Announcements) txpoolcfg.
if mt.TxnSlot.Type == BlobTxnType {
t := p.totalBlobsInPool.Load()
p.totalBlobsInPool.Store(t + (uint64(len(mt.TxnSlot.BlobHashes))))
+ for i, b := range mt.TxnSlot.BlobHashes {
+ p.blobHashToTxn[b] = struct {
+ index int
+ txnHash common.Hash
+ }{i, mt.TxnSlot.IDHash}
+ }
}
// Remove from mined cache as we are now "resurrecting" it to a sub-pool
@@ -1553,6 +1568,30 @@ func (p *TxPool) discardLocked(mt *metaTxn, reason txpoolcfg.DiscardReason) {
}
}
+func (p *TxPool) getBlobsAndProofByBlobHashLocked(blobHashes []common.Hash) ([][]byte, [][]byte) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+ blobs := make([][]byte, len(blobHashes))
+ proofs := make([][]byte, len(blobHashes))
+ for i, h := range blobHashes {
+ th, ok := p.blobHashToTxn[h]
+ if !ok {
+ continue
+ }
+ mt, ok := p.byHash[string(th.txnHash[:])]
+ if !ok || mt == nil {
+ continue
+ }
+ blobs[i] = mt.TxnSlot.Blobs[th.index]
+ proofs[i] = mt.TxnSlot.Proofs[th.index][:]
+ }
+ return blobs, proofs
+}
+
+func (p *TxPool) GetBlobs(blobHashes []common.Hash) ([][]byte, [][]byte) {
+ return p.getBlobsAndProofByBlobHashLocked(blobHashes)
+}
+
// Cache recently mined blobs in anticipation of reorg, delete finalized ones
func (p *TxPool) processMinedFinalizedBlobs(minedTxns []*TxnSlot, finalizedBlock uint64) error {
p.lastFinalizedBlock.Store(finalizedBlock)
diff --git a/txnprovider/txpool/pool_mock.go b/txnprovider/txpool/pool_mock.go
index 35e3136df48..97a398e6eb7 100644
--- a/txnprovider/txpool/pool_mock.go
+++ b/txnprovider/txpool/pool_mock.go
@@ -13,6 +13,7 @@ import (
context "context"
reflect "reflect"
+ common "github.com/erigontech/erigon-lib/common"
remoteproto "github.com/erigontech/erigon-lib/gointerfaces/remoteproto"
kv "github.com/erigontech/erigon-lib/kv"
txpoolcfg "github.com/erigontech/erigon/txnprovider/txpool/txpoolcfg"
@@ -193,6 +194,45 @@ func (c *MockPoolFilterKnownIdHashesCall) DoAndReturn(f func(kv.Tx, Hashes) (Has
return c
}
+// GetBlobs mocks base method.
+func (m *MockPool) GetBlobs(blobhashes []common.Hash) ([][]byte, [][]byte) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetBlobs", blobhashes)
+ ret0, _ := ret[0].([][]byte)
+ ret1, _ := ret[1].([][]byte)
+ return ret0, ret1
+}
+
+// GetBlobs indicates an expected call of GetBlobs.
+func (mr *MockPoolMockRecorder) GetBlobs(blobhashes any) *MockPoolGetBlobsCall {
+ mr.mock.ctrl.T.Helper()
+ call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlobs", reflect.TypeOf((*MockPool)(nil).GetBlobs), blobhashes)
+ return &MockPoolGetBlobsCall{Call: call}
+}
+
+// MockPoolGetBlobsCall wrap *gomock.Call
+type MockPoolGetBlobsCall struct {
+ *gomock.Call
+}
+
+// Return rewrite *gomock.Call.Return
+func (c *MockPoolGetBlobsCall) Return(arg0, arg1 [][]byte) *MockPoolGetBlobsCall {
+ c.Call = c.Call.Return(arg0, arg1)
+ return c
+}
+
+// Do rewrite *gomock.Call.Do
+func (c *MockPoolGetBlobsCall) Do(f func([]common.Hash) ([][]byte, [][]byte)) *MockPoolGetBlobsCall {
+ c.Call = c.Call.Do(f)
+ return c
+}
+
+// DoAndReturn rewrite *gomock.Call.DoAndReturn
+func (c *MockPoolGetBlobsCall) DoAndReturn(f func([]common.Hash) ([][]byte, [][]byte)) *MockPoolGetBlobsCall {
+ c.Call = c.Call.DoAndReturn(f)
+ return c
+}
+
// GetRlp mocks base method.
func (m *MockPool) GetRlp(tx kv.Tx, hash []byte) ([]byte, error) {
m.ctrl.T.Helper()
diff --git a/txnprovider/txpool/pool_test.go b/txnprovider/txpool/pool_test.go
index 456e2da0b4c..31f9b513eed 100644
--- a/txnprovider/txpool/pool_test.go
+++ b/txnprovider/txpool/pool_test.go
@@ -20,12 +20,13 @@ import (
"bytes"
"context"
"fmt"
- "github.com/erigontech/erigon-lib/state"
- accounts3 "github.com/erigontech/erigon-lib/types/accounts"
"math"
"math/big"
"testing"
+ "github.com/erigontech/erigon-lib/state"
+ accounts3 "github.com/erigontech/erigon-lib/types/accounts"
+
"github.com/holiman/uint256"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -46,7 +47,6 @@ import (
"github.com/erigontech/erigon-lib/log/v3"
"github.com/erigontech/erigon-lib/rlp"
"github.com/erigontech/erigon/core/types"
- "github.com/erigontech/erigon/core/types/typestest"
"github.com/erigontech/erigon/params"
"github.com/erigontech/erigon/txnprovider/txpool/txpoolcfg"
)
@@ -1252,7 +1252,7 @@ func TestBlobTxnReplacement(t *testing.T) {
// Todo, make the txn more realistic with good values
func makeBlobTxn() TxnSlot {
- wrapperRlp, commitments := typestest.MakeBlobTxnRlp()
+ wrapperRlp, commitments := types.MakeBlobTxnRlp()
commitment0 := commitments[0]
commitment1 := commitments[1]
tip, feeCap, blobFeeCap := uint256.NewInt(100_000), uint256.NewInt(200_000), uint256.NewInt(200_000)
@@ -1465,6 +1465,84 @@ func TestBlobSlots(t *testing.T) {
}
}
+func TestGetBlobsV1(t *testing.T) {
+ assert, require := assert.New(t), require.New(t)
+ ch := make(chan Announcements, 5)
+ coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir()))
+ db := memdb.NewTestPoolDB(t)
+ cfg := txpoolcfg.DefaultConfig
+ ctx, cancel := context.WithCancel(context.Background())
+ t.Cleanup(cancel)
+ //Setting limits for blobs in the pool
+ cfg.TotalBlobPoolLimit = 20
+
+ sendersCache := kvcache.New(kvcache.DefaultCoherentConfig)
+ pool, err := New(ctx, ch, db, coreDB, cfg, sendersCache, *u256.N1, common.Big0, nil, common.Big0, nil, nil, nil, nil, func() {}, nil, log.New(), WithFeeCalculator(nil))
+ assert.NoError(err)
+ require.True(pool != nil)
+ pool.blockGasLimit.Store(30000000)
+ var stateVersionID uint64 = 0
+
+ h1 := gointerfaces.ConvertHashToH256([32]byte{})
+ change := &remote.StateChangeBatch{
+ StateVersionId: stateVersionID,
+ PendingBlockBaseFee: 200_000,
+ BlockGasLimit: math.MaxUint64,
+ PendingBlobFeePerGas: 100_000,
+ ChangeBatch: []*remote.StateChange{
+ {BlockHeight: 0, BlockHash: h1},
+ },
+ }
+ var addr [20]byte
+
+ // Add 1 eth to the user account, as a part of change
+ acc := accounts3.Account{
+ Nonce: 0,
+ Balance: *uint256.NewInt(1 * common.Ether),
+ CodeHash: common.Hash{},
+ Incarnation: 1,
+ }
+ v := accounts3.SerialiseV3(&acc)
+
+ for i := 0; i < 11; i++ {
+ addr[0] = uint8(i + 1)
+ change.ChangeBatch[0].Changes = append(change.ChangeBatch[0].Changes, &remote.AccountChange{
+ Action: remote.Action_UPSERT,
+ Address: gointerfaces.ConvertAddressToH160(addr),
+ Data: v,
+ })
+ }
+
+ tx, err := db.BeginRw(ctx)
+ require.NoError(err)
+ defer tx.Rollback()
+ err = pool.OnNewBlock(ctx, change, TxnSlots{}, TxnSlots{}, TxnSlots{})
+ assert.NoError(err)
+ blobHashes := make([]common.Hash, 0, 20)
+
+ //Adding 2 blobs with 1 txn
+ txnSlots := TxnSlots{}
+ addr[0] = uint8(1)
+ blobTxn := makeBlobTxn() // makes a txn with 2 blobs
+ blobTxn.IDHash[0] = uint8(3)
+ blobTxn.Nonce = 0
+ blobTxn.Gas = 50000
+ txnSlots.Append(&blobTxn, addr[:], true)
+ reasons, err := pool.AddLocalTxns(ctx, txnSlots)
+ assert.NoError(err)
+ for _, reason := range reasons {
+ assert.Equal(txpoolcfg.Success, reason, reason.String())
+ }
+ blobHashes = append(blobHashes, blobTxn.BlobHashes...)
+
+ blobs, proofs := pool.GetBlobs(blobHashes)
+ require.True(len(blobs) == len(blobHashes))
+ require.True(len(proofs) == len(blobHashes))
+ assert.Equal(blobTxn.Blobs, blobs)
+ assert.Equal(blobTxn.Proofs[0][:], proofs[0])
+ assert.Equal(blobTxn.Proofs[1][:], proofs[1])
+}
+
func TestGasLimitChanged(t *testing.T) {
assert, require := assert.New(t), require.New(t)
ch := make(chan Announcements, 100)
diff --git a/txnprovider/txpool/txpool_grpc_server.go b/txnprovider/txpool/txpool_grpc_server.go
index 72fdb977007..b95118f14fe 100644
--- a/txnprovider/txpool/txpool_grpc_server.go
+++ b/txnprovider/txpool/txpool_grpc_server.go
@@ -59,6 +59,7 @@ type txPool interface {
CountContent() (int, int, int)
IdHashKnown(tx kv.Tx, hash []byte) (bool, error)
NonceFromAddress(addr [20]byte) (nonce uint64, inPool bool)
+ GetBlobs(blobhashes []common.Hash) (blobs [][]byte, proofs [][]byte)
}
var _ txpool_proto.TxpoolServer = (*GrpcServer)(nil) // compile-time interface check
@@ -226,6 +227,26 @@ func (s *GrpcServer) Add(ctx context.Context, in *txpool_proto.AddRequest) (*txp
return reply, nil
}
+func (s *GrpcServer) GetBlobs(ctx context.Context, in *txpool_proto.GetBlobsRequest) (*txpool_proto.GetBlobsReply, error) {
+ tx, err := s.db.BeginRo(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer tx.Rollback()
+
+ hashes := make([]common.Hash, len(in.BlobHashes))
+ for i := range in.BlobHashes {
+ hashes[i] = gointerfaces.ConvertH256ToHash(in.BlobHashes[i])
+ }
+ blobs, proofs := s.txPool.GetBlobs(hashes)
+ reply := &txpool_proto.GetBlobsReply{BlobsAndProofs: make([]*txpool_proto.BlobAndProofV1, len(blobs))}
+
+ for i := range blobs {
+ reply.BlobsAndProofs[i] = &txpool_proto.BlobAndProofV1{Blob: blobs[i], Proof: proofs[i]}
+ }
+ return reply, nil
+}
+
func mapDiscardReasonToProto(reason txpoolcfg.DiscardReason) txpool_proto.ImportResult {
switch reason {
case txpoolcfg.Success:
diff --git a/txnprovider/txpool/txpoolcfg/txpoolcfg.go b/txnprovider/txpool/txpoolcfg/txpoolcfg.go
index 5e3894b91bf..a74134ceb93 100644
--- a/txnprovider/txpool/txpoolcfg/txpoolcfg.go
+++ b/txnprovider/txpool/txpoolcfg/txpoolcfg.go
@@ -182,6 +182,16 @@ func (r DiscardReason) String() string {
return "blobs limit in txpool is full"
case NoAuthorizations:
return "EIP-7702 transactions with an empty authorization list are invalid"
+ case GasLimitTooHigh:
+ return "gas limit is too high"
+ case BlobHashCheckFail:
+ return "KZGcommitment's versioned hash has to be equal to blob_versioned_hash at the same index"
+ case UnmatchedBlobTxExt:
+ return "KZGcommitments must match the corresponding blobs and proofs"
+ case UnequalBlobTxExt:
+ return "blob_versioned_hashes, blobs, commitments and proofs must have equal number"
+ case ErrAuthorityReserved:
+ return "EIP-7702 transaction with authority already reserved"
default:
panic(fmt.Sprintf("discard reason: %d", r))
}