Skip to content

Commit

Permalink
Merge pull request #8959 from filecoin-project/feat/post-check-config
Browse files Browse the repository at this point in the history
sealer: Config for disabling builtin PoSt / PoSt pre-checks
  • Loading branch information
arajasek authored Jul 4, 2022
2 parents 2e7a2b1 + b35a325 commit 75d78de
Show file tree
Hide file tree
Showing 16 changed files with 899 additions and 290 deletions.
10 changes: 10 additions & 0 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -958,6 +958,11 @@ workflows:
suite: itest-verifreg
target: "./itests/verifreg_test.go"

- test:
name: test-itest-wdpost_check_config
suite: itest-wdpost_check_config
target: "./itests/wdpost_check_config_test.go"

- test:
name: test-itest-wdpost_dispute
suite: itest-wdpost_dispute
Expand All @@ -968,6 +973,11 @@ workflows:
suite: itest-wdpost
target: "./itests/wdpost_test.go"

- test:
name: test-itest-wdpost_worker_config
suite: itest-wdpost_worker_config
target: "./itests/wdpost_worker_config_test.go"

- test:
name: test-itest-worker
suite: itest-worker
Expand Down
3 changes: 3 additions & 0 deletions documentation/en/default-lotus-config.toml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@


[Backup]
# When set to true disables metadata log (.lotus/kvlog). This can save disk
# space by reducing metadata redundancy.
#
# Note that in case of metadata corruption it might be much harder to recover
# your node if metadata log is disabled
#
Expand Down
60 changes: 60 additions & 0 deletions documentation/en/default-lotus-miner-config.toml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@


[Backup]
# When set to true disables metadata log (.lotus/kvlog). This can save disk
# space by reducing metadata redundancy.
#
# Note that in case of metadata corruption it might be much harder to recover
# your node if metadata log is disabled
#
Expand Down Expand Up @@ -310,11 +313,68 @@

[Proving]
# Maximum number of sector checks to run in parallel. (0 = unlimited)
#
# WARNING: Setting this value too high may make the node crash by running out of stack
# WARNING: Setting this value too low may make sector challenge reading much slower, resulting in failed PoSt due
# to late submission.
#
# After changing this option, confirm that the new value works in your setup by invoking
# 'lotus-miner proving compute window-post 0'
#
# type: int
# env var: LOTUS_PROVING_PARALLELCHECKLIMIT
#ParallelCheckLimit = 128

# Disable Window PoSt computation on the lotus-miner process even if no window PoSt workers are present.
#
# WARNING: If no windowPoSt workers are connected, window PoSt WILL FAIL resulting in faulty sectors which will need
# to be recovered. Before enabling this option, make sure your PoSt workers work correctly.
#
# After changing this option, confirm that the new value works in your setup by invoking
# 'lotus-miner proving compute window-post 0'
#
# type: bool
# env var: LOTUS_PROVING_DISABLEBUILTINWINDOWPOST
#DisableBuiltinWindowPoSt = false

# Disable Winning PoSt computation on the lotus-miner process even if no winning PoSt workers are present.
#
# WARNING: If no WinningPoSt workers are connected, Winning PoSt WILL FAIL resulting in lost block rewards.
# Before enabling this option, make sure your PoSt workers work correctly.
#
# type: bool
# env var: LOTUS_PROVING_DISABLEBUILTINWINNINGPOST
#DisableBuiltinWinningPoSt = false

# Disable WindowPoSt provable sector readability checks.
#
# In normal operation, when preparing to compute WindowPoSt, lotus-miner will perform a round of reading challenges
# from all sectors to confirm that those sectors can be proven. Challenges read in this process are discarded, as
# we're only interested in checking that sector data can be read.
#
# When using builtin proof computation (no PoSt workers, and DisableBuiltinWindowPoSt is set to false), this process
# can save a lot of time and compute resources in the case that some sectors are not readable - this is caused by
# the builtin logic not skipping snark computation when some sectors need to be skipped.
#
# When using PoSt workers, this process is mostly redundant, with PoSt workers challenges will be read once, and
# if challenges for some sectors aren't readable, those sectors will just get skipped.
#
# Disabling sector pre-checks will slightly reduce IO load when proving sectors, possibly resulting in shorter
# time to produce window PoSt. In setups with good IO capabilities the effect of this option on proving time should
# be negligible.
#
# NOTE: It likely is a bad idea to disable sector pre-checks in setups with no PoSt workers.
#
# NOTE: Even when this option is enabled, recovering sectors will be checked before recovery declaration message is
# sent to the chain
#
# After changing this option, confirm that the new value works in your setup by invoking
# 'lotus-miner proving compute window-post 0'
#
# type: bool
# env var: LOTUS_PROVING_DISABLEWDPOSTPRECHECKS
#DisableWDPoStPreChecks = false


[Sealing]
# Upper bound on how many sectors can be waiting for more deals to be packed in it before it begins sealing at any given time.
Expand Down
188 changes: 188 additions & 0 deletions itests/wdpost_check_config_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,188 @@
package itests

import (
"context"
"testing"
"time"

"github.com/stretchr/testify/require"

"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"

"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/node/impl"
"github.com/filecoin-project/lotus/node/modules"
"github.com/filecoin-project/lotus/storage/sealer/mock"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
"github.com/filecoin-project/lotus/storage/wdpost"
)

func TestWindowPostNoPreChecks(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

client, miner, ens := kit.EnsembleMinimal(t,
kit.LatestActorsAt(-1),
kit.MockProofs(),
kit.ConstructorOpts(
node.Override(new(*wdpost.WindowPoStScheduler), modules.WindowPostScheduler(
config.DefaultStorageMiner().Fees,
config.ProvingConfig{
DisableWDPoStPreChecks: true,
},
))))
ens.InterconnectAll().BeginMining(2 * time.Millisecond)

nSectors := 10

miner.PledgeSectors(ctx, nSectors, 0, nil)

maddr, err := miner.ActorAddress(ctx)
require.NoError(t, err)
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)

mid, err := address.IDFromAddress(maddr)
require.NoError(t, err)

t.Log("Running one proving period")
waitUntil := di.Open + di.WPoStProvingPeriod
t.Logf("End for head.Height > %d", waitUntil)

ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
t.Logf("Now head.Height = %d", ts.Height())

p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)

ssz, err := miner.ActorSectorSize(ctx, maddr)
require.NoError(t, err)

require.Equal(t, p.MinerPower, p.TotalPower)
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(nSectors+kit.DefaultPresealsPerBootstrapMiner)))

t.Log("Drop some sectors")

// Drop 2 sectors from deadline 2 partition 0 (full partition / deadline)
{
parts, err := client.StateMinerPartitions(ctx, maddr, 2, types.EmptyTSK)
require.NoError(t, err)
require.Greater(t, len(parts), 0)

secs := parts[0].AllSectors
n, err := secs.Count()
require.NoError(t, err)
require.Equal(t, uint64(2), n)

// Drop the partition
err = secs.ForEach(func(sid uint64) error {
return miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkCorrupted(storiface.SectorRef{
ID: abi.SectorID{
Miner: abi.ActorID(mid),
Number: abi.SectorNumber(sid),
},
}, true)
})
require.NoError(t, err)
}

var s storiface.SectorRef

// Drop 1 sectors from deadline 3 partition 0
{
parts, err := client.StateMinerPartitions(ctx, maddr, 3, types.EmptyTSK)
require.NoError(t, err)
require.Greater(t, len(parts), 0)

secs := parts[0].AllSectors
n, err := secs.Count()
require.NoError(t, err)
require.Equal(t, uint64(2), n)

// Drop the sector
sn, err := secs.First()
require.NoError(t, err)

all, err := secs.All(2)
require.NoError(t, err)
t.Log("the sectors", all)

s = storiface.SectorRef{
ID: abi.SectorID{
Miner: abi.ActorID(mid),
Number: abi.SectorNumber(sn),
},
}

err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, true)
require.NoError(t, err)
}

di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)

t.Log("Go through another PP, wait for sectors to become faulty")
waitUntil = di.Open + di.WPoStProvingPeriod
t.Logf("End for head.Height > %d", waitUntil)

ts = client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
t.Logf("Now head.Height = %d", ts.Height())

p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)

require.Equal(t, p.MinerPower, p.TotalPower)

sectors := p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
require.Equal(t, nSectors+kit.DefaultPresealsPerBootstrapMiner-3, int(sectors)) // -3 just removed sectors

t.Log("Recover one sector")

err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, false)
require.NoError(t, err)

di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)

waitUntil = di.Open + di.WPoStProvingPeriod
t.Logf("End for head.Height > %d", waitUntil)

ts = client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
t.Logf("Now head.Height = %d", ts.Height())

p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)

require.Equal(t, p.MinerPower, p.TotalPower)

sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
require.Equal(t, nSectors+kit.DefaultPresealsPerBootstrapMiner-2, int(sectors)) // -2 not recovered sectors

// pledge a sector after recovery

miner.PledgeSectors(ctx, 1, nSectors, nil)

{
// Wait until proven.
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)

waitUntil := di.Open + di.WPoStProvingPeriod
t.Logf("End for head.Height > %d\n", waitUntil)

ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
t.Logf("Now head.Height = %d", ts.Height())
}

p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)

require.Equal(t, p.MinerPower, p.TotalPower)

sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
require.Equal(t, nSectors+kit.DefaultPresealsPerBootstrapMiner-2+1, int(sectors)) // -2 not recovered sectors + 1 just pledged
}
Loading

0 comments on commit 75d78de

Please sign in to comment.