Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Progpow integration #17731

Closed
wants to merge 6 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions cmd/puppeth/genesis.go
Original file line number Diff line number Diff line change
Expand Up @@ -239,6 +239,7 @@ type parityChainSpec struct {
DifficultyBombDelays map[string]string `json:"difficultyBombDelays"`
HomesteadTransition hexutil.Uint64 `json:"homesteadTransition"`
EIP100bTransition hexutil.Uint64 `json:"eip100bTransition"`
ProgpowTransition *hexutil.Uint64 `json:"progpowTransition,omitempty"`
} `json:"params"`
} `json:"Ethash"`
} `json:"engine"`
Expand Down Expand Up @@ -412,6 +413,11 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
if num := genesis.Config.IstanbulBlock; num != nil {
spec.setIstanbul(num)
}
// Progpow
if num := genesis.Config.ProgpowBlock; num != nil {
hexnum := hexutil.Uint64(num.Uint64())
spec.Engine.Ethash.Params.ProgpowTransition = &hexnum
}
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
spec.Params.GasLimitBoundDivisor = (math2.HexOrDecimal64)(params.GasLimitBoundDivisor)
Expand Down
1 change: 1 addition & 0 deletions cmd/puppeth/module_dashboard.go
Original file line number Diff line number Diff line change
Expand Up @@ -633,6 +633,7 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da
"Byzantium": conf.Genesis.Config.ByzantiumBlock,
"Constantinople": conf.Genesis.Config.ConstantinopleBlock,
"ConstantinopleFix": conf.Genesis.Config.PetersburgBlock,
"ProgPoW": conf.Genesis.Config.ProgpowBlock,
})
files[filepath.Join(workdir, "index.html")] = indexfile.Bytes()

Expand Down
6 changes: 6 additions & 0 deletions cmd/puppeth/wizard_genesis.go
Original file line number Diff line number Diff line change
Expand Up @@ -239,6 +239,12 @@ func (w *wizard) manageGenesis() {
fmt.Printf("Which block should YOLOv1 come into effect? (default = %v)\n", w.conf.Genesis.Config.YoloV1Block)
w.conf.Genesis.Config.YoloV1Block = w.readDefaultBigInt(w.conf.Genesis.Config.YoloV1Block)

if w.conf.Genesis.Config.Clique == nil {
fmt.Println()
fmt.Printf("Which block should ProgPow come into effect? (default = %v)\n", w.conf.Genesis.Config.ProgpowBlock)
w.conf.Genesis.Config.ProgpowBlock = w.readDefaultBigInt(w.conf.Genesis.Config.ProgpowBlock)
}

holiman marked this conversation as resolved.
Show resolved Hide resolved
out, _ := json.MarshalIndent(w.conf.Genesis.Config, "", " ")
fmt.Printf("Chain configuration updated:\n\n%s\n", out)

Expand Down
1 change: 1 addition & 0 deletions cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -1822,6 +1822,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readOnly bool) (chain *core.B
DatasetsInMem: eth.DefaultConfig.Ethash.DatasetsInMem,
DatasetsOnDisk: eth.DefaultConfig.Ethash.DatasetsOnDisk,
DatasetsLockMmap: eth.DefaultConfig.Ethash.DatasetsLockMmap,
ProgpowBlock: config.ProgpowBlock,
}, nil, false)
}
}
Expand Down
21 changes: 21 additions & 0 deletions consensus/ethash/algorithm.go
Original file line number Diff line number Diff line change
Expand Up @@ -207,6 +207,27 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
}
}

// generateCDag generates the cDag used for progpow. If the 'cDag' is nil, this method is a no-op. Otherwise
// it expects the cDag to be of size progpowCacheWords
func generateCDag(cDag, cache []uint32, epoch uint64) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Where is the CDag specified?

if cDag == nil {
return
}
start := time.Now()
keccak512 := makeHasher(sha3.NewLegacyKeccak512())

for i := uint32(0); i < progpowCacheWords/16; i++ {
rawData := generateDatasetItem(cache, i, keccak512)
// 64 bytes in rawData -> 16 uint32
for j := uint32(0); j < 16; j++ {
cDag[i*16+j] = binary.LittleEndian.Uint32(rawData[4*j:])
}
}

elapsed := time.Since(start)
log.Info("Generated progpow cDag", "elapsed", common.PrettyDuration(elapsed), "epoch", epoch)
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
log.Info("Generated progpow cDag", "elapsed", common.PrettyDuration(elapsed), "epoch", epoch)
log.Debug("Generated progpow cDag", "elapsed", common.PrettyDuration(elapsed), "epoch", epoch)

}

// swap changes the byte order of the buffer assuming a uint32 representation.
func swap(buffer []byte) {
for i := 0; i < len(buffer); i += 4 {
Expand Down
41 changes: 36 additions & 5 deletions consensus/ethash/algorithm_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -729,7 +729,7 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) {

go func(idx int) {
defer pend.Done()
ethash := New(Config{cachedir, 0, 1, false, "", 0, 0, false, ModeNormal, nil}, nil, false)
ethash := New(Config{cachedir, 0, 1, false, "", 0, 0, false, ModeNormal, nil, nil}, nil, false)
defer ethash.Close()
if err := ethash.VerifySeal(nil, block.Header()); err != nil {
t.Errorf("proc %d: block verification failed: %v", idx, err)
Expand All @@ -739,15 +739,15 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) {
pend.Wait()
}

// Benchmarks the cache generation performance.
// BenchmarkCacheGeneration benchmarks the cache generation performance.
func BenchmarkCacheGeneration(b *testing.B) {
for i := 0; i < b.N; i++ {
cache := make([]uint32, cacheSize(1)/4)
generateCache(cache, 0, make([]byte, 32))
}
}

// Benchmarks the dataset (small) generation performance.
// BenchmarkSmallDatasetGeneration benchmarks the dataset (small) generation performance.
func BenchmarkSmallDatasetGeneration(b *testing.B) {
cache := make([]uint32, 65536/4)
generateCache(cache, 0, make([]byte, 32))
Expand All @@ -759,7 +759,7 @@ func BenchmarkSmallDatasetGeneration(b *testing.B) {
}
}

// Benchmarks the light verification performance.
// BenchmarkHashimotoLight benchmarks the light verification performance.
func BenchmarkHashimotoLight(b *testing.B) {
cache := make([]uint32, cacheSize(1)/4)
generateCache(cache, 0, make([]byte, 32))
Expand All @@ -772,7 +772,22 @@ func BenchmarkHashimotoLight(b *testing.B) {
}
}

// Benchmarks the full (small) verification performance.
// BenchmarkProgpowLight benchmarks the light verification performance (not counting cDag generation).
func BenchmarkProgpowLight(b *testing.B) {
cache := make([]uint32, cacheSize(1)/4)
generateCache(cache, 0, make([]byte, 32))

hash := hexutil.MustDecode("0xc9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f")
cDag := make([]uint32, progpowCacheWords)
generateCDag(cDag, cache, 0)

b.ResetTimer()
for i := 0; i < b.N; i++ {
progpowLight(datasetSize(1), cache, hash, 0, 0, cDag)
}
}

// BenchmarkHashimotoFullSmall benchmarks the full (small) verification performance.
func BenchmarkHashimotoFullSmall(b *testing.B) {
cache := make([]uint32, 65536/4)
generateCache(cache, 0, make([]byte, 32))
Expand Down Expand Up @@ -812,3 +827,19 @@ func BenchmarkHashimotoFullMmap(b *testing.B) {
benchmarkHashimotoFullMmap(b, "WithLock", true)
benchmarkHashimotoFullMmap(b, "WithoutLock", false)
}

// BenchmarkProgpowFullSmall benchmarks the full (small) verification performance.
func BenchmarkProgpowFullSmall(b *testing.B) {
cache := make([]uint32, 65536/4)
generateCache(cache, 0, make([]byte, 32))

dataset := make([]uint32, 32*65536/4)
generateDataset(dataset, 0, cache)

hash := hexutil.MustDecode("0xc9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f")

b.ResetTimer()
for i := 0; i < b.N; i++ {
progpowFull(dataset, hash, 0, 0)
}
}
10 changes: 6 additions & 4 deletions consensus/ethash/consensus.go
Original file line number Diff line number Diff line change
Expand Up @@ -515,14 +515,16 @@ func (ethash *Ethash) verifySeal(chain consensus.ChainHeaderReader, header *type
number := header.Number.Uint64()

var (
digest []byte
result []byte
digest []byte
result []byte
powLight = ethash.lightPow(header.Number)
powFull = ethash.fullPow(header.Number)
)
// If fast-but-heavy PoW verification was requested, use an ethash dataset
if fulldag {
dataset := ethash.dataset(number, true)
if dataset.generated() {
digest, result = hashimotoFull(dataset.dataset, ethash.SealHash(header).Bytes(), header.Nonce.Uint64())
digest, result = powFull(dataset.dataset, ethash.SealHash(header).Bytes(), header.Nonce.Uint64(), number)

// Datasets are unmapped in a finalizer. Ensure that the dataset stays alive
// until after the call to hashimotoFull so it's not unmapped while being used.
Expand All @@ -540,7 +542,7 @@ func (ethash *Ethash) verifySeal(chain consensus.ChainHeaderReader, header *type
if ethash.config.PowMode == ModeTest {
size = 32 * 1024
}
digest, result = hashimotoLight(size, cache.cache, ethash.SealHash(header).Bytes(), header.Nonce.Uint64())
digest, result = powLight(size, cache.cache, ethash.SealHash(header).Bytes(), header.Nonce.Uint64(), number)

// Caches are unmapped in a finalizer. Ensure that the cache stays alive
// until after the call to hashimotoLight so it's not unmapped while being used.
Expand Down
78 changes: 78 additions & 0 deletions consensus/ethash/consensus_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,3 +84,81 @@ func TestCalcDifficulty(t *testing.T) {
}
}
}

//func TestTransitionToProgpow(t *testing.T) {
// fn := filepath.Join("..", "..", "tests", "hashi_to_pp_at_5.rlp.gz")
// fh, err := os.Open(fn)
// if err != nil {
// t.Skip(err)
// }
// defer fh.Close()
//
// var reader io.Reader = fh
// if strings.HasSuffix(fn, ".gz") {
// if reader, err = gzip.NewReader(reader); err != nil {
// t.Skip(err)
// }
// }
// stream := rlp.NewStream(reader, 0)
// config := &params.ChainConfig{
// HomesteadBlock: big.NewInt(1),
// EIP150Block: big.NewInt(2),
// EIP155Block: big.NewInt(3),
// EIP158Block: big.NewInt(3),
// ProgpowBlock: big.NewInt(5),
// }
// genesis := core.Genesis{Config: config,
// GasLimit: 0x47b760,
// Alloc: core.GenesisAlloc{},
// Timestamp: 0x59a4e76d,
// ExtraData: hexutil.MustDecode("0x0000000000000000000000000000000000000000000000000000000000000000"),
// }
// db := ethdb.NewMemDatabase()
// genesis.MustCommit(db)
//
// engine := New(Config{
// CacheDir: "",
// CachesInMem: 1,
// CachesOnDisk: 1,
// DatasetDir: "",
// DatasetsInMem: 1,
// DatasetsOnDisk: 1,
// ProgpowBlock: config.ProgpowBlock,
// }, nil, false)
// bc, err := core.NewBlockChain(db, nil, config, engine, vm.Config{}, nil)
// //fmt.Printf("Genesis hash %x\n", bc.Genesis().Hash())
// if err != nil {
// t.Skip(err)
// }
// blocks := make(types.Blocks, 100)
// n := 0
// for batch := 0; ; batch++ {
// // Load a batch of RLP blocks.
// i := 0
// for ; i < 100; i++ {
// var b types.Block
// if err := stream.Decode(&b); err == io.EOF {
// break
// } else if err != nil {
// t.Errorf("at block %d: %v", n, err)
// }
// // don't import first block
// if b.NumberU64() == 0 {
// i--
// continue
// }
// blocks[i] = &b
// n++
// }
// if i == 0 {
// break
// }
// if _, err := bc.InsertChain(blocks[:i]); err != nil {
// t.Fatalf("invalid block %d: %v", n, err)
// }
// }
// if bc.CurrentBlock().Number().Cmp(big.NewInt(1054)) != 0 {
// t.Errorf("Expected to import 1054 blocks, got %v", bc.CurrentBlock().Number())
//
// }
//}
Loading