Skip to content

Commit

Permalink
core, eth, accounts, trie: use slices.Concat
Browse files Browse the repository at this point in the history
  • Loading branch information
hteevoli authored Oct 29, 2024
1 parent bce420b commit 4397c71
Show file tree
Hide file tree
Showing 6 changed files with 17 additions and 12 deletions.
3 changes: 2 additions & 1 deletion accounts/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package accounts

import (
"reflect"
"slices"
"sort"
"sync"

Expand Down Expand Up @@ -255,7 +256,7 @@ func merge(slice []Wallet, wallets ...Wallet) []Wallet {
slice = append(slice, wallet)
continue
}
slice = append(slice[:n], append([]Wallet{wallet}, slice[n:]...)...)
slice = slices.Concat(slice[:n], []Wallet{wallet}, slice[n:])
}
return slice
}
Expand Down
5 changes: 3 additions & 2 deletions core/bloombits/matcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"context"
"errors"
"math"
"slices"
"sort"
"sync"
"sync/atomic"
Expand Down Expand Up @@ -417,7 +418,7 @@ func (m *Matcher) distributor(dist chan *request, session *MatcherSession) {
// New retrieval request arrived to be distributed to some fetcher process
queue := requests[req.bit]
index := sort.Search(len(queue), func(i int) bool { return queue[i] >= req.section })
requests[req.bit] = append(queue[:index], append([]uint64{req.section}, queue[index:]...)...)
requests[req.bit] = slices.Concat(queue[:index], []uint64{req.section}, queue[index:])

// If it's a new bit and we have waiting fetchers, allocate to them
if len(queue) == 0 {
Expand Down Expand Up @@ -485,7 +486,7 @@ func (m *Matcher) distributor(dist chan *request, session *MatcherSession) {
queue := requests[result.Bit]
for _, section := range missing {
index := sort.Search(len(queue), func(i int) bool { return queue[i] >= section })
queue = append(queue[:index], append([]uint64{section}, queue[index:]...)...)
queue = slices.Concat(queue[:index], []uint64{section}, queue[index:])
}
requests[result.Bit] = queue

Expand Down
2 changes: 1 addition & 1 deletion core/forkid/forkid.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ func newFilter(config *params.ChainConfig, genesis *types.Block, headfn func() (
// Calculate the all the valid fork hash and fork next combos
var (
forksByBlock, forksByTime = gatherForks(config, genesis.Time())
forks = append(append([]uint64{}, forksByBlock...), forksByTime...)
forks = slices.Concat(forksByBlock, forksByTime)
sums = make([][4]byte, len(forks)+1) // 0th is the genesis
)
hash := crc32.ChecksumIEEE(genesis.Hash().Bytes())
Expand Down
11 changes: 6 additions & 5 deletions core/rawdb/schema.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ package rawdb
import (
"bytes"
"encoding/binary"
"slices"

"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
Expand Down Expand Up @@ -171,7 +172,7 @@ func headerKeyPrefix(number uint64) []byte {

// headerKey = headerPrefix + num (uint64 big endian) + hash
func headerKey(number uint64, hash common.Hash) []byte {
return append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
return slices.Concat(headerPrefix, encodeBlockNumber(number), hash.Bytes())
}

// headerTDKey = headerPrefix + num (uint64 big endian) + hash + headerTDSuffix
Expand All @@ -181,7 +182,7 @@ func headerTDKey(number uint64, hash common.Hash) []byte {

// headerHashKey = headerPrefix + num (uint64 big endian) + headerHashSuffix
func headerHashKey(number uint64) []byte {
return append(append(headerPrefix, encodeBlockNumber(number)...), headerHashSuffix...)
return slices.Concat(headerPrefix, encodeBlockNumber(number), headerHashSuffix)
}

// headerNumberKey = headerNumberPrefix + hash
Expand All @@ -191,12 +192,12 @@ func headerNumberKey(hash common.Hash) []byte {

// blockBodyKey = blockBodyPrefix + num (uint64 big endian) + hash
func blockBodyKey(number uint64, hash common.Hash) []byte {
return append(append(blockBodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
return slices.Concat(blockBodyPrefix, encodeBlockNumber(number), hash.Bytes())
}

// blockReceiptsKey = blockReceiptsPrefix + num (uint64 big endian) + hash
func blockReceiptsKey(number uint64, hash common.Hash) []byte {
return append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
return slices.Concat(blockReceiptsPrefix, encodeBlockNumber(number), hash.Bytes())
}

// txLookupKey = txLookupPrefix + hash
Expand Down Expand Up @@ -225,7 +226,7 @@ func storageSnapshotsKey(accountHash common.Hash) []byte {

// bloomBitsKey = bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash
func bloomBitsKey(bit uint, section uint64, hash common.Hash) []byte {
key := append(append(bloomBitsPrefix, make([]byte, 10)...), hash.Bytes()...)
key := slices.Concat(bloomBitsPrefix, make([]byte, 10), hash.Bytes())

binary.BigEndian.PutUint16(key[1:], uint16(bit))
binary.BigEndian.PutUint64(key[3:], section)
Expand Down
3 changes: 2 additions & 1 deletion eth/downloader/downloader.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"errors"
"fmt"
"math/big"
"slices"
"sync"
"sync/atomic"
"time"
Expand Down Expand Up @@ -872,7 +873,7 @@ func (d *Downloader) processSnapSyncContent() error {
}
}
} else { // results already piled up, consume before handling pivot move
results = append(append([]*fetchResult{oldPivot}, oldTail...), results...)
results = slices.Concat([]*fetchResult{oldPivot}, oldTail, results)
}
// Split around the pivot block and process the two sides via snap/full sync
if !d.committed.Load() {
Expand Down
5 changes: 3 additions & 2 deletions trie/sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package trie
import (
"errors"
"fmt"
"slices"
"sync"

"github.com/ethereum/go-ethereum/common"
Expand Down Expand Up @@ -553,7 +554,7 @@ func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) {
}
children = []childNode{{
node: node.Val,
path: append(append([]byte(nil), req.path...), key...),
path: slices.Concat(req.path, key),
}}
// Mark all internal nodes between shortNode and its **in disk**
// child as invalid. This is essential in the case of path mode
Expand Down Expand Up @@ -595,7 +596,7 @@ func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) {
if node.Children[i] != nil {
children = append(children, childNode{
node: node.Children[i],
path: append(append([]byte(nil), req.path...), byte(i)),
path: slices.Concat(req.path, []byte{i}),
})
}
}
Expand Down

0 comments on commit 4397c71

Please sign in to comment.