From d0e0bc99372f5845b0739838af57747a42728af9 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 14 Jan 2021 17:41:58 -0800 Subject: [PATCH] Implement off-chain Window PoSt verification (#1327) This patch implements two changes: 1. New Window PoSts, except those that restore faulty sectors, are optimistically accepted, assumed correct, and recorded in the state-tree for one proving period. 2. Optimistically accepted window posts can be disputed until `WPoStProofDisputeWindow` epochs after the challenge window in which they were submitted closes. When a dispute successfully refutes an optimistically accepted Window PoSt, the miner is fined one IPF per active sector in the partition (at the moment when the proof was submitted) plus a flat fee of 20FIL, all incorrectly proved sectors are marked faulty, and the disputer (address that submitted the dispute) is rewarded a fixed `DipsuteReward`. --- actors/builtin/market/market_actor.go | 7 +- actors/builtin/market/market_test.go | 31 +- actors/builtin/market/policy.go | 3 +- actors/builtin/methods.go | 5 +- actors/builtin/miner/cbor_gen.go | 239 +++++++- actors/builtin/miner/deadline_state.go | 292 ++++++++-- actors/builtin/miner/deadline_state_test.go | 31 +- actors/builtin/miner/deadlines.go | 30 +- actors/builtin/miner/deadlines_helper_test.go | 47 ++ actors/builtin/miner/miner_actor.go | 248 ++++++-- actors/builtin/miner/miner_state.go | 20 +- actors/builtin/miner/miner_test.go | 548 ++++++++++++++++-- actors/builtin/miner/monies.go | 19 +- actors/builtin/miner/partition_state.go | 2 +- actors/builtin/miner/partition_state_test.go | 40 +- actors/builtin/miner/policy.go | 41 +- actors/builtin/miner/testing.go | 46 +- actors/builtin/shared.go | 10 +- actors/migration/nv9/miner.go | 23 +- gen/gen.go | 2 + 20 files changed, 1484 insertions(+), 200 deletions(-) create mode 100644 actors/builtin/miner/deadlines_helper_test.go diff --git a/actors/builtin/market/market_actor.go b/actors/builtin/market/market_actor.go index a23c7b4d3..76bafe476 100644 --- a/actors/builtin/market/market_actor.go +++ b/actors/builtin/market/market_actor.go @@ -195,8 +195,9 @@ func (a Actor) PublishStorageDeals(rt Runtime, params *PublishStorageDealsParams code := rt.Send(builtin.ExpertFundActorAddr, builtin.MethodsExpertFunds.BatchCheckData, &builtin.BatchPieceCIDParams{PieceCIDs: pids}, abi.NewTokenAmount(0), &builtin.Discard{}) builtin.RequireSuccess(rt, code, "failed to batch check expert data") - err := builtin.EnsureMinerNoPieces(rt, provider, pids) - builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "failed to check miner pieces") + stored, err := builtin.CheckMinerStoredAnyPiece(rt, provider, pids) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "failed to check miner stored pieces") + builtin.RequireState(rt, stored == false, "one or more files already stored") /* resolvedAddrs := make(map[addr.Address]addr.Address, len(params.Deals)) baselinePower := requestCurrentBaselinePower(rt) @@ -728,7 +729,7 @@ func genRandNextEpoch(currEpoch abi.ChainEpoch, deal *DealProposal, rbF func(cry } func genTerminateEpoch(currEpoch abi.ChainEpoch, dealID abi.DealID) (abi.ChainEpoch, error) { - offset := uint64(dealID) % uint64(DealUpdatesInterval) + offset := uint64(dealID) % uint64(DealTerminateLatency) return currEpoch + DealTerminateLatency + abi.ChainEpoch(offset), nil } diff --git a/actors/builtin/market/market_test.go b/actors/builtin/market/market_test.go index f7bfe1588..8553935c5 100644 --- a/actors/builtin/market/market_test.go +++ b/actors/builtin/market/market_test.go @@ -28,6 +28,11 @@ import ( "github.com/stretchr/testify/require" ) +var ( + cbgFalse = cbg.CborBool(false) + cbgTrue = cbg.CborBool(true) +) + func mustCbor(o cbor.Marshaler) []byte { buf := new(bytes.Buffer) if err := o.MarshalCBOR(buf); err != nil { @@ -490,7 +495,7 @@ func TestPublishStorageDeals(t *testing.T) { expectGetControlAddresses(rt, providerResolved, mAddr.owner, mAddr.worker, mAddr.coinbase) batchPids := builtin.BatchPieceCIDParams{PieceCIDs: []builtin.CheckedCID{{CID: deal.PieceCID}}} rt.ExpectSend(builtin.ExpertFundActorAddr, builtin.MethodsExpertFunds.BatchCheckData, &batchPids,big.Zero(), nil, exitcode.Ok) - rt.ExpectSend(providerResolved,builtin.MethodsMiner.EnsureNoPiece,&batchPids,big.Zero(),nil,exitcode.Ok) + rt.ExpectSend(providerResolved, builtin.MethodsMiner.StoredAny, &batchPids, big.Zero(), &cbgFalse, exitcode.Ok) // expectQueryNetworkInfo(rt, actor) // create a client proposal with a valid signature params := market.PublishStorageDealsParams{ @@ -868,7 +873,7 @@ func TestPublishStorageDealsFailures(t *testing.T) { batchPids := builtin.BatchPieceCIDParams{PieceCIDs: []builtin.CheckedCID{{CID: dealProposal.PieceCID}}} rt.ExpectSend(builtin.ExpertFundActorAddr, builtin.MethodsExpertFunds.BatchCheckData, &batchPids,big.Zero(), nil, exitcode.Ok) - rt.ExpectSend( provider, builtin.MethodsMiner.EnsureNoPiece, &batchPids,big.Zero(), nil, exitcode.Ok, ) + rt.ExpectSend( provider, builtin.MethodsMiner.StoredAny, &batchPids,big.Zero(), &cbgFalse, exitcode.Ok, ) /* expectQueryNetworkInfo(rt, actor) */ rt.SetCaller(worker, builtin.AccountActorCodeID) rt.ExpectVerifySignature(crypto.Signature{}, dealProposal.Client, mustCbor(&dealProposal), tc.signatureVerificationError) @@ -949,7 +954,7 @@ func TestPublishStorageDealsFailures(t *testing.T) { {CID: deal2.PieceCID}, }} rt.ExpectSend(builtin.ExpertFundActorAddr, builtin.MethodsExpertFunds.BatchCheckData, &batchPids,big.Zero(), nil, exitcode.Ok) - rt.ExpectSend(deal1.Provider, builtin.MethodsMiner.EnsureNoPiece, &batchPids, big.Zero(), nil, exitcode.Ok) + rt.ExpectSend(deal1.Provider, builtin.MethodsMiner.StoredAny, &batchPids, big.Zero(), &cbgFalse, exitcode.Ok) rt.SetCaller(worker, builtin.AccountActorCodeID) rt.ExpectVerifySignature(crypto.Signature{}, deal1.Client, mustCbor(&deal1), nil) @@ -1379,7 +1384,7 @@ func TestOnMinerSectorsTerminate(t *testing.T) { actor.terminateDeals(rt, provider, dealId1) ss = actor.checkState(rt) require.True(t, len(ss.DealOpEpochStats)==2 ) - _, ok := ss.DealOpEpochStats[currentEpoch+abi.ChainEpoch(market.DealTerminateLatency+dealId1)] + _, ok := ss.DealOpEpochStats[currentEpoch + market.DealTerminateLatency + abi.ChainEpoch(dealId1)] require.True(t, ok) // set a new epoch and terminate again -> however slash epoch will still be the old epoch. @@ -1387,10 +1392,10 @@ func TestOnMinerSectorsTerminate(t *testing.T) { rt.SetEpoch(newEpoch) actor.terminateDeals(rt, provider, dealId1, dealId2, dealId3) ss = actor.checkState(rt) - require.True(t, len(ss.DealOpEpochStats)==4 ) - _, ok = ss.DealOpEpochStats[newEpoch+abi.ChainEpoch(market.DealTerminateLatency+dealId2)] + require.True(t, len(ss.DealOpEpochStats) == 4) + _, ok = ss.DealOpEpochStats[newEpoch + market.DealTerminateLatency + abi.ChainEpoch(dealId2)] require.True(t, ok) - _, ok = ss.DealOpEpochStats[newEpoch+abi.ChainEpoch(market.DealTerminateLatency+dealId3)] + _, ok = ss.DealOpEpochStats[newEpoch + market.DealTerminateLatency + abi.ChainEpoch(dealId3)] require.True(t, ok) st := actor.getDealState(rt, dealId1) @@ -1599,7 +1604,7 @@ func TestCronTick(t *testing.T) { // slash deal1 slashEpoch := abi.ChainEpoch(150) - terminateEpoch := slashEpoch + abi.ChainEpoch(market.DealTerminateLatency + dealId1) + terminateEpoch := slashEpoch + market.DealTerminateLatency + abi.ChainEpoch(dealId1) rt.SetEpoch(slashEpoch) actor.terminateDeals(rt, provider, dealId1) @@ -1637,7 +1642,7 @@ func TestCronTick(t *testing.T) { rt.ExpectSend(provider, builtin.MethodsMiner.ControlAddresses, nil, abi.NewTokenAmount(0), &builtin.GetControlAddressesReturn{Worker: worker, Owner: owner, Coinbase: coinbase}, 0) batchPids := builtin.BatchPieceCIDParams{PieceCIDs: []builtin.CheckedCID{{CID: d2.PieceCID}}} rt.ExpectSend(builtin.ExpertFundActorAddr, builtin.MethodsExpertFunds.BatchCheckData, &batchPids, big.Zero(),nil,exitcode.Ok) - rt.ExpectSend(d2.Provider, builtin.MethodsMiner.EnsureNoPiece, &batchPids, big.Zero(),nil,exitcode.Ok) + rt.ExpectSend(d2.Provider, builtin.MethodsMiner.StoredAny, &batchPids, big.Zero(), &cbgFalse, exitcode.Ok) /* expectQueryNetworkInfo(rt, actor) */ rt.SetCaller(worker, builtin.AccountActorCodeID) rt.ExpectVerifySignature(crypto.Signature{}, d2.Client, mustCbor(&d2), nil) @@ -1942,7 +1947,7 @@ func TestCronTickTimedoutDeals(t *testing.T) { rt.ExpectSend(provider, builtin.MethodsMiner.ControlAddresses, nil, abi.NewTokenAmount(0), &builtin.GetControlAddressesReturn{Worker: worker, Owner: owner,Coinbase: coinbase}, 0) batchPids := builtin.BatchPieceCIDParams{PieceCIDs: []builtin.CheckedCID{{CID: d2.PieceCID}}} rt.ExpectSend(builtin.ExpertFundActorAddr, builtin.MethodsExpertFunds.BatchCheckData, &batchPids, big.Zero(),nil,exitcode.Ok) - rt.ExpectSend(d2.Provider, builtin.MethodsMiner.EnsureNoPiece, &batchPids, big.Zero(),nil,exitcode.Ok) + rt.ExpectSend(d2.Provider, builtin.MethodsMiner.StoredAny, &batchPids, big.Zero(), &cbgFalse, exitcode.Ok) /* expectQueryNetworkInfo(rt, actor) */ rt.SetCaller(worker, builtin.AccountActorCodeID) @@ -2361,7 +2366,7 @@ func TestMarketActorDeals(t *testing.T) { rt.ExpectSend(provider, builtin.MethodsMiner.ControlAddresses, nil, abi.NewTokenAmount(0), &builtin.GetControlAddressesReturn{Worker: worker, Owner: owner, Coinbase: coinbase}, 0) batchPids := builtin.BatchPieceCIDParams{PieceCIDs: []builtin.CheckedCID{{CID: dealProposal.PieceCID}}} rt.ExpectSend(builtin.ExpertFundActorAddr, builtin.MethodsExpertFunds.BatchCheckData, &batchPids, big.Zero(),nil,exitcode.Ok) - rt.ExpectSend(provider, builtin.MethodsMiner.EnsureNoPiece, &batchPids, big.Zero(),nil,exitcode.Ok) + rt.ExpectSend(provider, builtin.MethodsMiner.StoredAny, &batchPids, big.Zero(), &cbgFalse, exitcode.Ok) /* expectQueryNetworkInfo(rt, actor) */ rt.ExpectVerifySignature(crypto.Signature{}, client, mustCbor(¶ms.Deals[0].Proposal), nil) @@ -2419,7 +2424,7 @@ func TestMaxDealLabelSize(t *testing.T) { rt.ExpectSend(provider, builtin.MethodsMiner.ControlAddresses, nil, abi.NewTokenAmount(0), &builtin.GetControlAddressesReturn{Worker: worker, Owner: owner,Coinbase: coinbase}, 0) batchPids := builtin.BatchPieceCIDParams{PieceCIDs: []builtin.CheckedCID{{CID: dealProposal.PieceCID}}} rt.ExpectSend(builtin.ExpertFundActorAddr, builtin.MethodsExpertFunds.BatchCheckData, &batchPids, big.Zero(),nil,exitcode.Ok) - rt.ExpectSend(provider, builtin.MethodsMiner.EnsureNoPiece, &batchPids, big.Zero(),nil,exitcode.Ok) + rt.ExpectSend(provider, builtin.MethodsMiner.StoredAny, &batchPids, big.Zero(), &cbgFalse, exitcode.Ok) /* expectQueryNetworkInfo(rt, actor) */ rt.ExpectVerifySignature(crypto.Signature{}, client, mustCbor(¶ms.Deals[0].Proposal), nil) @@ -3085,7 +3090,7 @@ func (h *marketActorTestHarness) publishDeals(rt *mock.Runtime, minerAddrs *mine batchPids.PieceCIDs = append(batchPids.PieceCIDs, builtin.CheckedCID{CID: dr.deal.PieceCID}) } rt.ExpectSend(builtin.ExpertFundActorAddr, builtin.MethodsExpertFunds.BatchCheckData, &batchPids, big.Zero(), nil, exitcode.Ok) - rt.ExpectSend(minerAddrs.provider, builtin.MethodsMiner.EnsureNoPiece, &batchPids, big.Zero(), nil, exitcode.Ok) + rt.ExpectSend(minerAddrs.provider, builtin.MethodsMiner.StoredAny, &batchPids, big.Zero(), &cbgFalse, exitcode.Ok) /* expectQueryNetworkInfo(rt, h) */ params := market.PublishStorageDealsParams{ diff --git a/actors/builtin/market/policy.go b/actors/builtin/market/policy.go index 7b8cf92d2..51ec3632c 100644 --- a/actors/builtin/market/policy.go +++ b/actors/builtin/market/policy.go @@ -1,12 +1,13 @@ package market import ( + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/specs-actors/v2/actors/builtin" ) // The number of epochs between payment and other state processing for deals. const DealUpdatesInterval = builtin.EpochsInDay // PARAM_SPEC //TODO: -const DealTerminateLatency = 240 // 4 deadlines +const DealTerminateLatency = abi.ChainEpoch(60) // 1 deadline /* // The percentage of normalized cirulating // supply that must be covered by provider collateral in a deal diff --git a/actors/builtin/methods.go b/actors/builtin/methods.go index c46180ef3..4b4074ed4 100644 --- a/actors/builtin/methods.go +++ b/actors/builtin/methods.go @@ -103,8 +103,9 @@ var MethodsMiner = struct { AddPledge abi.MethodNum WithdrawPledge abi.MethodNum ChangeCoinbase abi.MethodNum - EnsureNoPiece abi.MethodNum -}{MethodConstructor, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26} + StoredAny abi.MethodNum + DisputeWindowedPoSt abi.MethodNum +}{MethodConstructor, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27} // var MethodsVerifiedRegistry = struct { // Constructor abi.MethodNum diff --git a/actors/builtin/miner/cbor_gen.go b/actors/builtin/miner/cbor_gen.go index b69b3eae6..9b687b7ca 100644 --- a/actors/builtin/miner/cbor_gen.go +++ b/actors/builtin/miner/cbor_gen.go @@ -792,7 +792,7 @@ func (t *Deadlines) UnmarshalCBOR(r io.Reader) error { return nil } -var lengthBufDeadline = []byte{135} +var lengthBufDeadline = []byte{138} func (t *Deadline) MarshalCBOR(w io.Writer) error { if t == nil { @@ -817,8 +817,8 @@ func (t *Deadline) MarshalCBOR(w io.Writer) error { return xerrors.Errorf("failed to write cid field t.ExpirationsEpochs: %w", err) } - // t.PostSubmissions (bitfield.BitField) (struct) - if err := t.PostSubmissions.MarshalCBOR(w); err != nil { + // t.PartitionsPoSted (bitfield.BitField) (struct) + if err := t.PartitionsPoSted.MarshalCBOR(w); err != nil { return err } @@ -843,6 +843,25 @@ func (t *Deadline) MarshalCBOR(w io.Writer) error { if err := t.FaultyPower.MarshalCBOR(w); err != nil { return err } + + // t.OptimisticPoStSubmissions (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.OptimisticPoStSubmissions); err != nil { + return xerrors.Errorf("failed to write cid field t.OptimisticPoStSubmissions: %w", err) + } + + // t.PartitionsSnapshot (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.PartitionsSnapshot); err != nil { + return xerrors.Errorf("failed to write cid field t.PartitionsSnapshot: %w", err) + } + + // t.OptimisticPoStSubmissionsSnapshot (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.OptimisticPoStSubmissionsSnapshot); err != nil { + return xerrors.Errorf("failed to write cid field t.OptimisticPoStSubmissionsSnapshot: %w", err) + } + return nil } @@ -860,7 +879,7 @@ func (t *Deadline) UnmarshalCBOR(r io.Reader) error { return fmt.Errorf("cbor input should be of type array") } - if extra != 7 { + if extra != 10 { return fmt.Errorf("cbor input had wrong number of fields") } @@ -888,12 +907,12 @@ func (t *Deadline) UnmarshalCBOR(r io.Reader) error { t.ExpirationsEpochs = c } - // t.PostSubmissions (bitfield.BitField) (struct) + // t.PartitionsPoSted (bitfield.BitField) (struct) { - if err := t.PostSubmissions.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.PostSubmissions: %w", err) + if err := t.PartitionsPoSted.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.PartitionsPoSted: %w", err) } } @@ -942,6 +961,42 @@ func (t *Deadline) UnmarshalCBOR(r io.Reader) error { return xerrors.Errorf("unmarshaling t.FaultyPower: %w", err) } + } + // t.OptimisticPoStSubmissions (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.OptimisticPoStSubmissions: %w", err) + } + + t.OptimisticPoStSubmissions = c + + } + // t.PartitionsSnapshot (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PartitionsSnapshot: %w", err) + } + + t.PartitionsSnapshot = c + + } + // t.OptimisticPoStSubmissionsSnapshot (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.OptimisticPoStSubmissionsSnapshot: %w", err) + } + + t.OptimisticPoStSubmissionsSnapshot = c + } return nil } @@ -2198,6 +2253,99 @@ func (t *VestingFund) UnmarshalCBOR(r io.Reader) error { return nil } +var lengthBufWindowedPoSt = []byte{130} + +func (t *WindowedPoSt) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufWindowedPoSt); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Partitions (bitfield.BitField) (struct) + if err := t.Partitions.MarshalCBOR(w); err != nil { + return err + } + + // t.Proofs ([]proof.PoStProof) (slice) + if len(t.Proofs) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Proofs was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Proofs))); err != nil { + return err + } + for _, v := range t.Proofs { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + return nil +} + +func (t *WindowedPoSt) UnmarshalCBOR(r io.Reader) error { + *t = WindowedPoSt{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Partitions (bitfield.BitField) (struct) + + { + + if err := t.Partitions.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Partitions: %w", err) + } + + } + // t.Proofs ([]proof.PoStProof) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Proofs: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Proofs = make([]proof.PoStProof, extra) + } + + for i := 0; i < int(extra); i++ { + + var v proof.PoStProof + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Proofs[i] = v + } + + return nil +} + var lengthBufSubmitWindowedPoStParams = []byte{133} func (t *SubmitWindowedPoStParams) MarshalCBOR(w io.Writer) error { @@ -3735,6 +3883,83 @@ func (t *WithdrawPledgeParams) UnmarshalCBOR(r io.Reader) error { return nil } +var lengthBufDisputeWindowedPoStParams = []byte{130} + +func (t *DisputeWindowedPoStParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufDisputeWindowedPoStParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Deadline (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Deadline)); err != nil { + return err + } + + // t.PoStIndex (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.PoStIndex)); err != nil { + return err + } + + return nil +} + +func (t *DisputeWindowedPoStParams) UnmarshalCBOR(r io.Reader) error { + *t = DisputeWindowedPoStParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Deadline (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Deadline = uint64(extra) + + } + // t.PoStIndex (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PoStIndex = uint64(extra) + + } + return nil +} + var lengthBufFaultDeclaration = []byte{131} func (t *FaultDeclaration) MarshalCBOR(w io.Writer) error { diff --git a/actors/builtin/miner/deadline_state.go b/actors/builtin/miner/deadline_state.go index a18e7b3a4..646f393d9 100644 --- a/actors/builtin/miner/deadline_state.go +++ b/actors/builtin/miner/deadline_state.go @@ -12,6 +12,7 @@ import ( cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" + "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" "github.com/filecoin-project/specs-actors/v2/actors/util/adt" ) @@ -40,8 +41,13 @@ type Deadline struct { // recovered, and this queue will not be updated at that time. ExpirationsEpochs cid.Cid // AMT[ChainEpoch]BitField - // Partitions numbers with PoSt submissions since the proving period started. - PostSubmissions bitfield.BitField + // Partitions that have been proved by window PoSts so far during the + // current challenge window. + // NOTE: This bitfield includes both partitions whose proofs + // were optimistically accepted and stored in + // OptimisticPoStSubmissions, and those whose proofs were + // verified on-chain. + PartitionsPoSted bitfield.BitField // Partitions with sectors that terminated early. EarlyTerminations bitfield.BitField @@ -54,12 +60,42 @@ type Deadline struct { // Memoized sum of faulty power in partitions. FaultyPower PowerPair + + // AMT of optimistically accepted WindowPoSt proofs, submitted during + // the current challenge window. At the end of the challenge window, + // this AMT will be moved to PoStSubmissionsSnapshot. WindowPoSt proofs + // verified on-chain do not appear in this AMT. + OptimisticPoStSubmissions cid.Cid // AMT[]WindowedPoSt + + // Snapshot of partition state at the end of the previous challenge + // window for this deadline. + PartitionsSnapshot cid.Cid + // Snapshot of the proofs submitted by the end of the previous challenge + // window for this deadline. + // + // These proofs may be disputed via DisputeWindowedPoSt. Successfully + // disputed window PoSts are removed from the snapshot. + OptimisticPoStSubmissionsSnapshot cid.Cid +} + +type WindowedPoSt struct { + // Partitions proved by this WindowedPoSt. + Partitions bitfield.BitField + // Array of proofs, one per distinct registered proof type present in + // the sectors being proven. In the usual case of a single proof type, + // this array will always have a single element (independent of number + // of partitions). + Proofs []proof.PoStProof } // Bitwidth of AMTs determined empirically from mutation patterns and projections of mainnet data. const DeadlinePartitionsAmtBitwidth = 3 // Usually a small array const DeadlineExpirationAmtBitwidth = 5 +// Given that 4 partitions can be proven in one post, this AMT's height will +// only exceed the partition AMT's height at ~0.75EiB of storage. +const DeadlineOptimisticPoStSubmissionsAmtBitwidth = 2 + // // Deadlines (plural) // @@ -130,14 +166,22 @@ func ConstructDeadline(store adt.Store) (*Deadline, error) { return nil, xerrors.Errorf("failed to construct empty deadline expiration array: %w", err) } + emptyPoStSubmissionsArrayCid, err := adt.StoreEmptyArray(store, DeadlineOptimisticPoStSubmissionsAmtBitwidth) + if err != nil { + return nil, xerrors.Errorf("failed to construct empty proofs array: %w", err) + } + return &Deadline{ - Partitions: emptyPartitionsArrayCid, - ExpirationsEpochs: emptyDeadlineExpirationArrayCid, - PostSubmissions: bitfield.New(), - EarlyTerminations: bitfield.New(), - LiveSectors: 0, - TotalSectors: 0, - FaultyPower: NewPowerPairZero(), + Partitions: emptyPartitionsArrayCid, + ExpirationsEpochs: emptyDeadlineExpirationArrayCid, + EarlyTerminations: bitfield.New(), + LiveSectors: 0, + TotalSectors: 0, + FaultyPower: NewPowerPairZero(), + PartitionsPoSted: bitfield.New(), + OptimisticPoStSubmissions: emptyPoStSubmissionsArrayCid, + PartitionsSnapshot: emptyPartitionsArrayCid, + OptimisticPoStSubmissionsSnapshot: emptyPoStSubmissionsArrayCid, }, nil } @@ -149,6 +193,30 @@ func (d *Deadline) PartitionsArray(store adt.Store) (*adt.Array, error) { return arr, nil } +func (d *Deadline) OptimisticProofsArray(store adt.Store) (*adt.Array, error) { + arr, err := adt.AsArray(store, d.OptimisticPoStSubmissions, DeadlineOptimisticPoStSubmissionsAmtBitwidth) + if err != nil { + return nil, xerrors.Errorf("failed to load proofs: %w", err) + } + return arr, nil +} + +func (d *Deadline) PartitionsSnapshotArray(store adt.Store) (*adt.Array, error) { + arr, err := adt.AsArray(store, d.PartitionsSnapshot, DeadlinePartitionsAmtBitwidth) + if err != nil { + return nil, xerrors.Errorf("failed to load partitions snapshot: %w", err) + } + return arr, nil +} + +func (d *Deadline) OptimisticProofsSnapshotArray(store adt.Store) (*adt.Array, error) { + arr, err := adt.AsArray(store, d.OptimisticPoStSubmissionsSnapshot, DeadlineOptimisticPoStSubmissionsAmtBitwidth) + if err != nil { + return nil, xerrors.Errorf("failed to load proofs snapshot: %w", err) + } + return arr, nil +} + func (d *Deadline) LoadPartition(store adt.Store, partIdx uint64) (*Partition, error) { partitions, err := d.PartitionsArray(store) if err != nil { @@ -165,6 +233,22 @@ func (d *Deadline) LoadPartition(store adt.Store, partIdx uint64) (*Partition, e return &partition, nil } +func (d *Deadline) LoadPartitionSnapshot(store adt.Store, partIdx uint64) (*Partition, error) { + partitions, err := d.PartitionsSnapshotArray(store) + if err != nil { + return nil, err + } + var partition Partition + found, err := partitions.Get(partIdx, &partition) + if err != nil { + return nil, xerrors.Errorf("failed to lookup partition %d: %w", partIdx, err) + } + if !found { + return nil, xc.ErrNotFound.Wrapf("no partition %d", partIdx) + } + return &partition, nil +} + // Adds some partition numbers to the set expiring at an epoch. func (d *Deadline) AddExpirationPartitions(store adt.Store, expirationEpoch abi.ChainEpoch, partitions []uint64, quant QuantSpec) error { // Avoid doing any work if there's nothing to reschedule. @@ -691,7 +775,7 @@ func (dl *Deadline) RemovePartitions(store adt.Store, toRemove bitfield.BitField return live, dead, removedPower, nil } -func (dl *Deadline) DeclareFaults( +func (dl *Deadline) RecordFaults( store adt.Store, sectors Sectors, ssize abi.SectorSize, quant QuantSpec, faultExpirationEpoch abi.ChainEpoch, partitionSectors PartitionSectorMap, ) (powerDelta PowerPair, err error) { @@ -712,7 +796,7 @@ func (dl *Deadline) DeclareFaults( return xc.ErrNotFound.Wrapf("no such partition %d", partIdx) } - newFaults, partitionPowerDelta, partitionNewFaultyPower, err := partition.DeclareFaults( + newFaults, partitionPowerDelta, partitionNewFaultyPower, err := partition.RecordFaults( store, sectors, sectorNos, faultExpirationEpoch, ssize, quant, ) if err != nil { @@ -805,7 +889,7 @@ func (dl *Deadline) ProcessDeadlineEnd(store adt.Store, quant QuantSpec, faultEx detectedAny := false var rescheduledPartitions []uint64 for partIdx := uint64(0); partIdx < partitions.Length(); partIdx++ { - proven, err := dl.PostSubmissions.IsSet(partIdx) + proven, err := dl.PartitionsPoSted.IsSet(partIdx) if err != nil { return powerDelta, penalizedPower, xerrors.Errorf("failed to check submission for partition %d: %w", partIdx, err) } @@ -868,8 +952,14 @@ func (dl *Deadline) ProcessDeadlineEnd(store adt.Store, quant QuantSpec, faultEx return powerDelta, penalizedPower, xc.ErrIllegalState.Wrapf("failed to update deadline expiration queue: %w", err) } - // Reset PoSt submissions. - dl.PostSubmissions = bitfield.New() + // Reset PoSt submissions, snapshot proofs. + dl.PartitionsPoSted = bitfield.New() + dl.PartitionsSnapshot = dl.Partitions + dl.OptimisticPoStSubmissionsSnapshot = dl.OptimisticPoStSubmissions + dl.OptimisticPoStSubmissions, err = adt.StoreEmptyArray(store, DeadlineOptimisticPoStSubmissionsAmtBitwidth) + if err != nil { + return powerDelta, penalizedPower, xerrors.Errorf("failed to clear pending proofs array: %w", err) + } return powerDelta, penalizedPower, nil } @@ -882,11 +972,8 @@ type PoStResult struct { Sectors bitfield.BitField // IgnoredSectors is a subset of Sectors that should be ignored. IgnoredSectors bitfield.BitField -} - -// PenaltyPower is the power from this PoSt that should be penalized. -func (p *PoStResult) PenaltyPower() PowerPair { - return p.NewFaultyPower.Add(p.RetractedRecoveryPower) + // Bitfield of partitions that were proven. + Partitions bitfield.BitField } // RecordProvenSectors processes a series of posts, recording proven partitions @@ -896,14 +983,33 @@ func (p *PoStResult) PenaltyPower() PowerPair { // changes to power (newly faulty power, power that should have been proven // recovered but wasn't, and newly recovered power). // -// NOTE: This function does not actually _verify_ any proofs. The returned -// Sectors and IgnoredSectors must subsequently be validated against the PoSt -// submitted by the miner. +// NOTE: This function does not actually _verify_ any proofs. func (dl *Deadline) RecordProvenSectors( store adt.Store, sectors Sectors, ssize abi.SectorSize, quant QuantSpec, faultExpiration abi.ChainEpoch, postPartitions []PoStPartition, ) (*PoStResult, error) { + + partitionIndexes := bitfield.New() + for _, partition := range postPartitions { + partitionIndexes.Set(partition.Index) + } + if numPartitions, err := partitionIndexes.Count(); err != nil { + return nil, xerrors.Errorf("failed to count posted partitions: %w", err) + } else if numPartitions != uint64(len(postPartitions)) { + return nil, xc.ErrIllegalArgument.Wrapf("duplicate partitions proven") + } + + // First check to see if we're proving any already proven partitions. + // This is faster than checking one by one. + if alreadyProven, err := bitfield.IntersectBitField(dl.PartitionsPoSted, partitionIndexes); err != nil { + return nil, xerrors.Errorf("failed to check proven partitions: %w", err) + } else if empty, err := alreadyProven.IsEmpty(); err != nil { + return nil, xerrors.Errorf("failed to check proven intersection is empty: %w", err) + } else if !empty { + return nil, xc.ErrIllegalArgument.Wrapf("partition already proven: %v", alreadyProven) + } + partitions, err := dl.PartitionsArray(store) if err != nil { return nil, err @@ -919,15 +1025,6 @@ func (dl *Deadline) RecordProvenSectors( // Accumulate sectors info for proof verification. for _, post := range postPartitions { - alreadyProven, err := dl.PostSubmissions.IsSet(post.Index) - if err != nil { - return nil, xc.ErrIllegalState.Wrapf("failed to check if partition %d already posted: %w", post.Index, err) - } - if alreadyProven { - // Skip partitions already proven for this deadline. - continue - } - var partition Partition found, err := partitions.Get(post.Index, &partition) if err != nil { @@ -971,7 +1068,7 @@ func (dl *Deadline) RecordProvenSectors( powerDelta = powerDelta.Add(newPowerDelta).Add(recoveredPower) // Record the post. - dl.PostSubmissions.Set(post.Index) + dl.PartitionsPoSted.Set(post.Index) // At this point, the partition faults represents the expected faults for the proof, with new skipped // faults and recoveries taken into account. @@ -1010,10 +1107,66 @@ func (dl *Deadline) RecordProvenSectors( NewFaultyPower: newFaultyPowerTotal, RecoveredPower: recoveredPowerTotal, RetractedRecoveryPower: retractedRecoveryPowerTotal, + Partitions: partitionIndexes, }, nil } -/* // RescheduleSectorExpirations reschedules the expirations of the given sectors +// RecordPoStProofs records a set of optimistically accepted PoSt proofs +// (usually one), associating them with the given partitions. +func (dl *Deadline) RecordPoStProofs(store adt.Store, partitions bitfield.BitField, proofs []proof.PoStProof) error { + proofArr, err := dl.OptimisticProofsArray(store) + if err != nil { + return xerrors.Errorf("failed to load proofs: %w", err) + } + err = proofArr.AppendContinuous(&WindowedPoSt{ + Partitions: partitions, + Proofs: proofs, + }) + if err != nil { + return xerrors.Errorf("failed to store proof: %w", err) + } + + root, err := proofArr.Root() + if err != nil { + return xerrors.Errorf("failed to save proofs: %w", err) + } + dl.OptimisticPoStSubmissions = root + return nil +} + +// TakePoStProofs removes and returns a PoSt proof by index, along with the +// associated partitions. This method takes the PoSt from the PoSt submissions +// snapshot. +func (dl *Deadline) TakePoStProofs(store adt.Store, idx uint64) (partitions bitfield.BitField, proofs []proof.PoStProof, err error) { + proofArr, err := dl.OptimisticProofsSnapshotArray(store) + if err != nil { + return bitfield.New(), nil, xerrors.Errorf("failed to load proofs: %w", err) + } + var post WindowedPoSt + found, err := proofArr.Get(idx, &post) + if err != nil { + return bitfield.New(), nil, xerrors.Errorf("failed to retrieve proof %d: %w", idx, err) + } else if !found { + return bitfield.New(), nil, xc.ErrIllegalArgument.Wrapf("proof %d not found", idx) + } + + // Delete the proof from the proofs array, leaving a hole. + // This will not affect concurrent attempts to refute other proofs. + err = proofArr.Delete(idx) + if err != nil { + return bitfield.New(), nil, xerrors.Errorf("failed to delete proof %d: %w", idx, err) + } + + root, err := proofArr.Root() + if err != nil { + return bitfield.New(), nil, xerrors.Errorf("failed to save proofs: %w", err) + } + dl.OptimisticPoStSubmissionsSnapshot = root + return post.Partitions, post.Proofs, nil +} + +/* +// RescheduleSectorExpirations reschedules the expirations of the given sectors // to the target epoch, skipping any sectors it can't find. // // The power of the rescheduled sectors is assumed to have not changed since @@ -1077,6 +1230,79 @@ func (dl *Deadline) RescheduleSectorExpirations( return allReplaced, nil } */ +// DisputeInfo includes all the information necessary to dispute a post to the +// given partitions. +type DisputeInfo struct { + AllSectorNos, IgnoredSectorNos bitfield.BitField + DisputedSectors PartitionSectorMap + DisputedPower PowerPair +} + +// LoadPartitionsForDispute +func (dl *Deadline) LoadPartitionsForDispute(store adt.Store, partitions bitfield.BitField) (*DisputeInfo, error) { + partitionsSnapshot, err := dl.PartitionsSnapshotArray(store) + if err != nil { + return nil, xerrors.Errorf("failed to load partitions: %w", err) + } + + var allSectors, allIgnored []bitfield.BitField + disputedSectors := make(PartitionSectorMap) + disputedPower := NewPowerPairZero() + err = partitions.ForEach(func(partIdx uint64) error { + var partitionSnapshot Partition + if found, err := partitionsSnapshot.Get(partIdx, &partitionSnapshot); err != nil { + return err + } else if !found { + return xerrors.Errorf("failed to find partition %d", partIdx) + } + + // Record sectors for proof verification + allSectors = append(allSectors, partitionSnapshot.Sectors) + allIgnored = append(allIgnored, partitionSnapshot.Faults) + allIgnored = append(allIgnored, partitionSnapshot.Terminated) + allIgnored = append(allIgnored, partitionSnapshot.Unproven) + + // Record active sectors for marking faults. + active, err := partitionSnapshot.ActiveSectors() + if err != nil { + return err + } + err = disputedSectors.Add(partIdx, active) + if err != nil { + return err + } + + // Record disputed power for penalties. + // + // NOTE: This also includes power that was + // activated at the end of the last challenge + // window, and power from sectors that have since + // expired. + disputedPower = disputedPower.Add(partitionSnapshot.ActivePower()) + return nil + }) + if err != nil { + return nil, xerrors.Errorf("when disputing post: %w", err) + } + + allSectorsNos, err := bitfield.MultiMerge(allSectors...) + if err != nil { + return nil, xerrors.Errorf("failed to merge sector bitfields: %w", err) + } + + allIgnoredNos, err := bitfield.MultiMerge(allIgnored...) + if err != nil { + return nil, xerrors.Errorf("failed to merge fault bitfields: %w", err) + } + + return &DisputeInfo{ + AllSectorNos: allSectorsNos, + IgnoredSectorNos: allIgnoredNos, + DisputedSectors: disputedSectors, + DisputedPower: disputedPower, + }, nil +} + func (d *Deadline) ValidateState() error { if d.LiveSectors > d.TotalSectors { return xerrors.Errorf("Deadline left with more live sectors than total: %v", d) diff --git a/actors/builtin/miner/deadline_state_test.go b/actors/builtin/miner/deadline_state_test.go index eea6fb79b..2de4dad8c 100644 --- a/actors/builtin/miner/deadline_state_test.go +++ b/actors/builtin/miner/deadline_state_test.go @@ -212,7 +212,7 @@ func TestDeadlines(t *testing.T) { } // Mark faulty. - powerDelta, err := dl.DeclareFaults( + powerDelta, err := dl.RecordFaults( store, sectorsArr(t, store, sectors), sectorSize, quantSpec, 9, map[uint64]bitfield.BitField{ 0: bf(1), @@ -592,7 +592,6 @@ func TestDeadlines(t *testing.T) { ).assert(t, store, dl) postResult2, err := dl.RecordProvenSectors(store, sectorArr, sectorSize, quantSpec, 13, []miner.PoStPartition{ - {Index: 1, Skipped: bf()}, // ignore already posted partitions {Index: 2, Skipped: bf()}, }) require.NoError(t, err) @@ -787,6 +786,28 @@ func TestDeadlines(t *testing.T) { require.Contains(t, err.Error(), "no such partition") }) + t.Run("post partition twice", func(t *testing.T) { + store := ipld.NewADTStore(context.Background()) + + dl := emptyDeadline(t, store) + addSectors(t, store, dl, true) + + // add an inactive sector + power, err := dl.AddSectors(store, partitionSize, true, extraSectors, sectorSize, quantSpec) + require.NoError(t, err) + expectedPower := miner.PowerForSectors(sectorSize, extraSectors) + assert.True(t, expectedPower.Equals(power)) + + sectorArr := sectorsArr(t, store, allSectors) + + _, err = dl.RecordProvenSectors(store, sectorArr, sectorSize, quantSpec, 13, []miner.PoStPartition{ + {Index: 0, Skipped: bf()}, + {Index: 0, Skipped: bf()}, + }) + require.Error(t, err) + require.Contains(t, err.Error(), "duplicate partitions proven") + }) + t.Run("retract recoveries", func(t *testing.T) { store := ipld.NewADTStore(context.Background()) dl := emptyDeadline(t, store) @@ -803,7 +824,7 @@ func TestDeadlines(t *testing.T) { })) // Retract recovery for sector 1. - powerDelta, err := dl.DeclareFaults(store, sectorArr, sectorSize, quantSpec, 13, map[uint64]bitfield.BitField{ + powerDelta, err := dl.RecordFaults(store, sectorArr, sectorSize, quantSpec, 13, map[uint64]bitfield.BitField{ 0: bf(1), }) @@ -910,7 +931,7 @@ func TestDeadlines(t *testing.T) { sectorArr := sectorsArr(t, store, allSectors) // Declare sectors 1 & 6 faulty. - _, err := dl.DeclareFaults(store, sectorArr, sectorSize, quantSpec, 17, map[uint64]bitfield.BitField{ + _, err := dl.RecordFaults(store, sectorArr, sectorSize, quantSpec, 17, map[uint64]bitfield.BitField{ 0: bf(1), 4: bf(6), }) @@ -1013,7 +1034,7 @@ func (s expectedDeadlineState) assert(t *testing.T, store adt.Store, dl *miner.D assertBitfieldsEqual(t, s.recovering, recoveries) assertBitfieldsEqual(t, s.terminations, terminations) assertBitfieldsEqual(t, s.unproven, unproven) - assertBitfieldsEqual(t, s.posts, dl.PostSubmissions) + assertBitfieldsEqual(t, s.posts, dl.PartitionsPoSted) partitions, err := dl.PartitionsArray(store) require.NoError(t, err) diff --git a/actors/builtin/miner/deadlines.go b/actors/builtin/miner/deadlines.go index 4d4706619..67d4fb7b8 100644 --- a/actors/builtin/miner/deadlines.go +++ b/actors/builtin/miner/deadlines.go @@ -57,7 +57,8 @@ func FindSector(store adt.Store, deadlines *Deadlines, sectorNum abi.SectorNumbe return 0, 0, xerrors.Errorf("sector %d not due at any deadline", sectorNum) } -// Returns true if the deadline at the given index is currently mutable. +// Returns true if the deadline at the given index is currently mutable. A +// "mutable" deadline may have new sectors assigned to it. func deadlineIsMutable(provingPeriodStart abi.ChainEpoch, dlIdx uint64, currentEpoch abi.ChainEpoch) bool { // Get the next non-elapsed deadline (i.e., the next time we care about // mutations to the deadline). @@ -66,3 +67,30 @@ func deadlineIsMutable(provingPeriodStart abi.ChainEpoch, dlIdx uint64, currentE // that deadline opens. return currentEpoch < dlInfo.Open-WPoStChallengeWindow } + +// Returns true if optimistically accepted posts submitted to the given deadline +// may be disputed. Specifically, this ensures that: +// +// 1. Optimistic PoSts may not be disputed while the challenge window is open. +// 2. Optimistic PoSts may not be disputed after the miner could have compacted the deadline. +func deadlineAvailableForOptimisticPoStDispute(provingPeriodStart abi.ChainEpoch, dlIdx uint64, currentEpoch abi.ChainEpoch) bool { + if provingPeriodStart > currentEpoch { + // We haven't started proving yet, there's nothing to dispute. + return false + } + dlInfo := NewDeadlineInfo(provingPeriodStart, dlIdx, currentEpoch).NextNotElapsed() + + return !dlInfo.IsOpen() && currentEpoch < (dlInfo.Close-WPoStProvingPeriod)+WPoStDisputeWindow +} + +// Returns true if the given deadline may compacted in the current epoch. +// Deadlines may not be compacted when: +// +// 1. The deadline is currently being challenged. +// 2. The deadline is to be challenged next. +// 3. Optimistically accepted posts from the deadline's last challenge window +// can currently be disputed. +func deadlineAvailableForCompaction(provingPeriodStart abi.ChainEpoch, dlIdx uint64, currentEpoch abi.ChainEpoch) bool { + return deadlineIsMutable(provingPeriodStart, dlIdx, currentEpoch) && + !deadlineAvailableForOptimisticPoStDispute(provingPeriodStart, dlIdx, currentEpoch) +} diff --git a/actors/builtin/miner/deadlines_helper_test.go b/actors/builtin/miner/deadlines_helper_test.go new file mode 100644 index 000000000..f3858700d --- /dev/null +++ b/actors/builtin/miner/deadlines_helper_test.go @@ -0,0 +1,47 @@ +package miner + +import ( + "testing" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/stretchr/testify/assert" +) + +func TestCompactionWindow(t *testing.T) { + periodStart := abi.ChainEpoch(1024) + dlInfo := NewDeadlineInfo(periodStart, 0, 0) + assert.True(t, deadlineAvailableForCompaction(periodStart, 0, dlInfo.Open-WPoStChallengeWindow-1), + "compaction is possible up till the blackout period") + assert.False(t, deadlineAvailableForCompaction(periodStart, 0, dlInfo.Open-WPoStChallengeWindow), + "compaction is not possible during the prior window") + + assert.False(t, deadlineAvailableForCompaction(periodStart, 0, dlInfo.Open+10), + "compaction is not possible during the window") + + assert.False(t, deadlineAvailableForCompaction(periodStart, 0, dlInfo.Close), + "compaction is not possible immediately after the window") + + assert.False(t, deadlineAvailableForCompaction(periodStart, 0, dlInfo.Last()+WPoStDisputeWindow), + "compaction is not possible before the proof challenge period has passed") + + assert.True(t, deadlineAvailableForCompaction(periodStart, 0, dlInfo.Close+WPoStDisputeWindow), + "compaction is possible after the proof challenge period has passed") + + assert.True(t, deadlineAvailableForCompaction(periodStart, 0, dlInfo.Open+WPoStProvingPeriod-WPoStChallengeWindow-1), + "compaction remains possible until the next blackout") + assert.False(t, deadlineAvailableForCompaction(periodStart, 0, dlInfo.Open+WPoStProvingPeriod-WPoStChallengeWindow), + "compaction is not possible during the next blackout") +} + +func TestChallengeWindow(t *testing.T) { + periodStart := abi.ChainEpoch(1024) + dlInfo := NewDeadlineInfo(periodStart, 0, 0) + assert.False(t, deadlineAvailableForOptimisticPoStDispute(periodStart, 0, dlInfo.Open), + "proof challenge is not possible while the window is open") + assert.True(t, deadlineAvailableForOptimisticPoStDispute(periodStart, 0, dlInfo.Close), + "proof challenge is possible after the window is closes") + assert.True(t, deadlineAvailableForOptimisticPoStDispute(periodStart, 0, dlInfo.Close+WPoStDisputeWindow-1), + "proof challenge is possible until the proof challenge period has passed") + assert.False(t, deadlineAvailableForOptimisticPoStDispute(periodStart, 0, dlInfo.Close+WPoStDisputeWindow), + "proof challenge is not possible after the proof challenge period has passed") +} diff --git a/actors/builtin/miner/miner_actor.go b/actors/builtin/miner/miner_actor.go index 126aea082..977fcb0ec 100644 --- a/actors/builtin/miner/miner_actor.go +++ b/actors/builtin/miner/miner_actor.go @@ -70,8 +70,8 @@ func (a Actor) Exports() []interface{} { 23: a.AddPledge, 24: a.WithdrawPledge, 25: a.ChangeCoinbase, - 26: a.EnsureNoPiece, - /* 8: a.ExtendSectorExpiration, */ + 26: a.StoredAny, + 27: a.DisputeWindowedPoSt, } } @@ -384,11 +384,6 @@ func (a Actor) SubmitWindowedPoSt(rt Runtime, params *SubmitWindowedPoStParams) rt.Abortf(exitcode.ErrIllegalArgument, "expected at most %d bytes of randomness, got %d", abi.RandomnessLength, len(params.ChainCommitRand)) } - partitionIndexes := bitfield.New() - for _, partition := range params.Partitions { - partitionIndexes.Set(partition.Index) - } - var postResult *PoStResult var info *MinerInfo rt.StateTransaction(&st, func() { @@ -405,6 +400,8 @@ func (a Actor) SubmitWindowedPoSt(rt Runtime, params *SubmitWindowedPoStParams) rt.Abortf(exitcode.ErrIllegalArgument, "expected exactly one proof, got %d", len(params.Proofs)) } else if params.Proofs[0].PoStProof != info.WindowPoStProofType { rt.Abortf(exitcode.ErrIllegalArgument, "expected proof of type %s, got proof of type %s", info.WindowPoStProofType, params.Proofs[0]) + } else if len(params.Proofs[0].ProofBytes) > MaxPoStProofSize { + rt.Abortf(exitcode.ErrIllegalArgument, "expected proof to be smaller than %d bytes", MaxPoStProofSize) } // Validate that the miner didn't try to prove too many partitions at once. @@ -450,38 +447,27 @@ func (a Actor) SubmitWindowedPoSt(rt Runtime, params *SubmitWindowedPoStParams) deadline, err := deadlines.LoadDeadline(store, params.Deadline) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load deadline %d", params.Deadline) - alreadyProven, err := bitfield.IntersectBitField(deadline.PostSubmissions, partitionIndexes) - builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to check proven partitions") - empty, err := alreadyProven.IsEmpty() - builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to check proven intersection is empty") - if !empty { - rt.Abortf(exitcode.ErrIllegalArgument, "partition already proven: %v", alreadyProven) - } - // Record proven sectors/partitions, returning updates to power and the final set of sectors // proven/skipped. // - // NOTE: This function does not actually check the proofs but does assume that they'll be - // successfully validated. The actual proof verification is done below in verifyWindowedPost. + // NOTE: This function does not actually check the proofs but does assume that they're correct. Instead, + // it snapshots the deadline's state and the submitted proofs at the end of the challenge window and + // allows third-parties to dispute these proofs. // - // If proof verification fails, the this deadline MUST NOT be saved and this function should - // be aborted. + // While we could perform _all_ operations at the end of challenge window, we do as we can here to avoid + // overloading cron. faultExpiration := currDeadline.Last() + FaultMaxAge postResult, err = deadline.RecordProvenSectors(store, sectors, info.SectorSize, QuantSpecForDeadline(currDeadline), faultExpiration, params.Partitions) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to process post submission for deadline %d", params.Deadline) - // Skipped sectors (including retracted recoveries) pay nothing at Window PoSt, - // but will incur the "ongoing" fault fee at deadline end. + // Make sure we actually proved something. - // Validate proofs + provenSectors, err := bitfield.SubtractBitField(postResult.Sectors, postResult.IgnoredSectors) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to determine proven sectors for deadline %d", params.Deadline) - // Load sector infos for proof, substituting a known-good sector for known-faulty sectors. - // Note: this is slightly sub-optimal, loading info for the recovering sectors again after they were already - // loaded above. - sectorInfos, err := sectors.LoadForProof(postResult.Sectors, postResult.IgnoredSectors) - builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load proven sector info") - - if len(sectorInfos) == 0 { + noSectors, err := provenSectors.IsEmpty() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to determine if any sectors were proven", params.Deadline) + if noSectors { // Abort verification if all sectors are (now) faults. There's nothing to prove. // It's not rational for a miner to submit a Window PoSt marking *all* non-faulty sectors as skipped, // since that will just cause them to pay a penalty at deadline end that would otherwise be zero @@ -489,11 +475,18 @@ func (a Actor) SubmitWindowedPoSt(rt Runtime, params *SubmitWindowedPoStParams) rt.Abortf(exitcode.ErrIllegalArgument, "cannot prove partitions with no active sectors") } - // Verify the proof. - // A failed verification doesn't immediately cause a penalty; the miner can try again. - // - // This function aborts on failure. - verifyWindowedPost(rt, currDeadline.Challenge, sectorInfos, params.Proofs) + // If we're not recovering power, record the proof for optimistic verification. + if postResult.RecoveredPower.IsZero() { + err = deadline.RecordPoStProofs(store, postResult.Partitions, params.Proofs) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to record proof for optimistic verification", params.Deadline) + } else { + // otherwise, check the proof + sectorInfos, err := sectors.LoadForProof(postResult.Sectors, postResult.IgnoredSectors) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load sectors for post verification") + + err = verifyWindowedPost(rt, currDeadline.Challenge, sectorInfos, params.Proofs) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "window post failed") + } err = deadlines.UpdateDeadline(store, params.Deadline, deadline) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to update deadline %d", params.Deadline) @@ -515,6 +508,159 @@ func (a Actor) SubmitWindowedPoSt(rt Runtime, params *SubmitWindowedPoStParams) return nil } +type DisputeWindowedPoStParams struct { + Deadline uint64 + PoStIndex uint64 // only one is allowed at a time to avoid loading too many sector infos. +} + +func (a Actor) DisputeWindowedPoSt(rt Runtime, params *DisputeWindowedPoStParams) *abi.EmptyValue { + rt.ValidateImmediateCallerType(builtin.CallerTypesSignable...) + reporter := rt.Caller() + + if params.Deadline >= WPoStPeriodDeadlines { + rt.Abortf(exitcode.ErrIllegalArgument, "invalid deadline %d of %d", params.Deadline, WPoStPeriodDeadlines) + } + + currEpoch := rt.CurrEpoch() + + // Note: these are going to be slightly inaccurate as time + // will have moved on from when the post was actually + // submitted. + // + // // However, these are estimates _anyways_. + // epochReward := requestCurrentEpochBlockReward(rt) + // pwrTotal := requestCurrentTotalPower(rt) + + toBurn := abi.NewTokenAmount(0) + toReward := abi.NewTokenAmount(0) + // pledgeDelta := abi.NewTokenAmount(0) + powerDelta := NewPowerPairZero() + var st State + rt.StateTransaction(&st, func() { + if !deadlineAvailableForOptimisticPoStDispute(st.ProvingPeriodStart, params.Deadline, currEpoch) { + rt.Abortf(exitcode.ErrForbidden, "can only dispute window posts during the dispute window (%d epochs after the challenge window closes)", WPoStDisputeWindow) + } + + info := getMinerInfo(rt, &st) + penalisedPower := NewPowerPairZero() + store := adt.AsStore(rt) + + // Check proof + { + // Find the proving period start for the deadline in question. + ppStart := st.ProvingPeriodStart + if st.CurrentDeadline < params.Deadline { + ppStart -= WPoStProvingPeriod + } + targetDeadline := NewDeadlineInfo(ppStart, params.Deadline, currEpoch) + + // Load the target deadline. + deadlinesCurrent, err := st.LoadDeadlines(store) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load deadlines") + + dlCurrent, err := deadlinesCurrent.LoadDeadline(store, params.Deadline) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load deadline") + + // Take the post from the snapshot for dispute. + // This operation REMOVES the PoSt from the snapshot so + // it can't be disputed again. If this method fails, + // this operation must be rolled back. + partitions, proofs, err := dlCurrent.TakePoStProofs(store, params.PoStIndex) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load proof for dispute") + + // Load the partition info we need for the dispute. + disputeInfo, err := dlCurrent.LoadPartitionsForDispute(store, partitions) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load partition info for dispute") + // This includes power that is no longer active (e.g., due to sector terminations). + // It must only be used for penalty calculations, not power adjustments. + penalisedPower = disputeInfo.DisputedPower + + // Load sectors for the dispute. + sectors, err := LoadSectors(store, st.Sectors) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load sectors array") + + sectorInfos, err := sectors.LoadForProof(disputeInfo.AllSectorNos, disputeInfo.IgnoredSectorNos) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load sectors to dispute window post") + + // Check proof, we fail if validation succeeds. + err = verifyWindowedPost(rt, targetDeadline.Challenge, sectorInfos, proofs) + if err == nil { + rt.Abortf(exitcode.ErrIllegalArgument, "failed to dispute valid post") + return + } + rt.Log(rtt.INFO, "successfully disputed: %s", err) + + // Ok, now we record faults. This always works because + // we don't allow compaction/moving sectors during the + // challenge window. + // + // However, some of these sectors may have been + // terminated. That's fine, we'll skip them. + faultExpirationEpoch := targetDeadline.Last() + FaultMaxAge + powerDelta, err = dlCurrent.RecordFaults(store, sectors, info.SectorSize, QuantSpecForDeadline(targetDeadline), faultExpirationEpoch, disputeInfo.DisputedSectors) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to declare faults") + + err = deadlinesCurrent.UpdateDeadline(store, params.Deadline, dlCurrent) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to update deadline %d", params.Deadline) + err = st.SaveDeadlines(store, deadlinesCurrent) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to save deadlines") + } + + // Penalties. + { + // // Calculate the base penalty. + // penaltyBase := PledgePenaltyForInvalidWindowPoSt( + // epochReward.ThisEpochRewardSmoothed, + // pwrTotal.QualityAdjPowerSmoothed, + // penalisedPower.QA, + // ) + + // Calculate the target reward. + rewardTarget := RewardForDisputedWindowPoSt(info.WindowPoStProofType, penalisedPower) + + // // Compute the target penalty by adding the + // // base penalty to the target reward. We don't + // // take reward out of the penalty as the miner + // // could end up receiving a substantial + // // portion of their fee back as a reward. + // penaltyTarget := big.Add(penaltyBase, rewardTarget) + + err := st.ApplyPenalty(rewardTarget) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to apply penalty") + penaltyFromVesting, penaltyFromBalance, err := st.RepayPartialDebtInPriorityOrder(store, currEpoch, rt.CurrentBalance()) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to pay debt") + toBurn = big.Add(penaltyFromVesting, penaltyFromBalance) + + // Now, move as much of the target reward as + // we can from the burn to the reward. + toReward = big.Min(toBurn, rewardTarget) + toBurn = big.Sub(toBurn, toReward) + + // pledgeDelta = penaltyFromVesting.Neg() + } + }) + + requestUpdatePower(rt, big.Zero(), powerDelta) + + if !toReward.IsZero() { + // Try to send the reward to the reporter. + code := rt.Send(reporter, builtin.MethodSend, nil, toReward, &builtin.Discard{}) + + // If we fail, log and burn the reward to make sure the balances remain correct. + if !code.IsSuccess() { + rt.Log(rtt.ERROR, "failed to send reward") + toBurn = big.Add(toBurn, toReward) + } + } + burnFunds(rt, toBurn) + // notifyPledgeChanged(rt, pledgeDelta) + rt.StateReadonly(&st) + + err := st.CheckBalanceInvariants(rt.CurrentBalance()) + builtin.RequireNoErr(rt, err, ErrBalanceInvariantBroken, "balance invariants broken") + return nil +} + /////////////////////// // Sector Commitment // /////////////////////// @@ -1157,9 +1303,8 @@ type TerminateSectorsReturn struct { // AddressedPartitionsMax per epoch until the queue is empty. // // The sectors are immediately ignored for Window PoSt proofs, and should be -// masked in the same way as faulty sectors. A miner terminating sectors in the -// current deadline must be careful to compute an appropriate Window PoSt proof -// for the sectors that will be active at the time the PoSt is submitted. +// masked in the same way as faulty sectors. A miner may not terminate sectors in the +// current deadline or the next deadline to be proven. // // This function may be invoked with no new sectors to explicitly process the // next batch of sectors. @@ -1204,6 +1349,12 @@ func (a Actor) TerminateSectors(rt Runtime, params *TerminateSectorsParams) *Ter builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load sectors") err = toProcess.ForEach(func(dlIdx uint64, partitionSectors PartitionSectorMap) error { + // If the deadline the current or next deadline to prove, don't allow terminating sectors. + // We assume that deadlines are immutable when being proven. + if !deadlineIsMutable(st.ProvingPeriodStart, dlIdx, currEpoch) { + rt.Abortf(exitcode.ErrIllegalArgument, "cannot terminate sectors in immutable deadline %d", dlIdx) + } + quant := st.QuantSpecForDeadline(dlIdx) deadline, err := deadlines.LoadDeadline(store, dlIdx) @@ -1305,7 +1456,7 @@ func (a Actor) DeclareFaults(rt Runtime, params *DeclareFaultsParams) *abi.Empty builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load deadline %d", dlIdx) faultExpirationEpoch := targetDeadline.Last() + FaultMaxAge - deadlinePowerDelta, err := deadline.DeclareFaults(store, sectors, info.SectorSize, QuantSpecForDeadline(targetDeadline), faultExpirationEpoch, pm) + deadlinePowerDelta, err := deadline.RecordFaults(store, sectors, info.SectorSize, QuantSpecForDeadline(targetDeadline), faultExpirationEpoch, pm) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to declare faults for deadline %d", dlIdx) err = deadlines.UpdateDeadline(store, dlIdx, deadline) @@ -1441,9 +1592,9 @@ func (a Actor) CompactPartitions(rt Runtime, params *CompactPartitionsParams) *a info := getMinerInfo(rt, &st) rt.ValidateImmediateCallerIs(append(info.ControlAddresses, info.Owner, info.Worker)...) - if !deadlineIsMutable(st.ProvingPeriodStart, params.Deadline, rt.CurrEpoch()) { + if !deadlineAvailableForCompaction(st.ProvingPeriodStart, params.Deadline, rt.CurrEpoch()) { rt.Abortf(exitcode.ErrForbidden, - "cannot compact deadline %d during its challenge window or the prior challenge window", params.Deadline) + "cannot compact deadline %d during its challenge window, or the prior challenge window, or before %d epochs have passed since its last challenge window ended", params.Deadline, WPoStDisputeWindow) } submissionPartitionLimit := loadPartitionsSectorsMax(info.WindowPoStPartitionSectors) @@ -1762,16 +1913,17 @@ func (a Actor) ChangeCoinbase(rt Runtime, newAddress *addr.Address) *abi.EmptyVa return nil } -func (a Actor) EnsureNoPiece(rt Runtime, params *builtin.BatchPieceCIDParams) *abi.EmptyValue { +func (a Actor) StoredAny(rt Runtime, params *builtin.BatchPieceCIDParams) *cbg.CborBool { rt.ValidateImmediateCallerIs(builtin.StorageMarketActorAddr) var st State rt.StateReadonly(&st) - err := st.MustNotContainAnyPiece(adt.AsStore(rt), params.PieceCIDs) - builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "piece already exist") + exist, err := st.ContainsAnyPiece(adt.AsStore(rt), params.PieceCIDs) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to check piece existence") - return nil + out := cbg.CborBool(exist) + return &out } ////////// @@ -2120,7 +2272,7 @@ func havePendingEarlyTerminations(rt Runtime, st *State) bool { return !noEarlyTerminations } -func verifyWindowedPost(rt Runtime, challengeEpoch abi.ChainEpoch, sectors []*SectorOnChainInfo, proofs []proof.PoStProof) { +func verifyWindowedPost(rt Runtime, challengeEpoch abi.ChainEpoch, sectors []*SectorOnChainInfo, proofs []proof.PoStProof) error { minerActorID, err := addr.IDFromAddress(rt.Receiver()) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "runtime provided bad receiver address %v", rt.Receiver()) @@ -2149,9 +2301,11 @@ func verifyWindowedPost(rt Runtime, challengeEpoch abi.ChainEpoch, sectors []*Se } // Verify the PoSt Proof - if err = rt.VerifyPoSt(pvInfo); err != nil { - rt.Abortf(exitcode.ErrIllegalArgument, "invalid PoSt %+v: %s", pvInfo, err) + err = rt.VerifyPoSt(pvInfo) + if err != nil { + return fmt.Errorf("invalid PoSt %+v: %w", pvInfo, err) } + return nil } // SealVerifyParams is the structure of information that must be sent with a diff --git a/actors/builtin/miner/miner_state.go b/actors/builtin/miner/miner_state.go index 14c6d91d4..633a0ef70 100644 --- a/actors/builtin/miner/miner_state.go +++ b/actors/builtin/miner/miner_state.go @@ -1241,6 +1241,8 @@ func (st *State) AdvanceDeadline(store adt.Store, currEpoch abi.ChainEpoch) (*Ad // No live sectors in this deadline, nothing to do. if deadline.LiveSectors == 0 { + // We should do some more checks here. See: + // Fix: https://github.com/filecoin-project/specs-actors/issues/1348 return &AdvanceDeadlineResult{ /* pledgeDelta, */ powerDelta, @@ -1318,7 +1320,7 @@ func (st *State) AdvanceDeadline(store adt.Store, currEpoch abi.ChainEpoch) (*Ad }, nil } -// Assumes pieces are all checked before by MustNotContainAnyPiece +// Assumes pieces are all checked before by ContainsAnyPiece func (st *State) AddPieces(store adt.Store, sdi market.SectorDealInfos, sno abi.SectorNumber) error { if len(sdi.PieceCIDs) == 0 { return nil @@ -1341,11 +1343,11 @@ func (st *State) AddPieces(store adt.Store, sdi market.SectorDealInfos, sno abi. return nil } -func (st *State) MustNotContainAnyPiece(store adt.Store, pieceCIDs []builtin.CheckedCID) error { +func (st *State) ContainsAnyPiece(store adt.Store, pieceCIDs []builtin.CheckedCID) (bool, error) { pieces, err := adt.AsMap(store, st.Pieces, builtin.DefaultHamtBitwidth) if err != nil { - return xerrors.Errorf("failed to load Pieces: %w", err) + return false, xerrors.Errorf("failed to load Pieces: %w", err) } for _, pieceCID := range pieceCIDs { @@ -1353,7 +1355,7 @@ func (st *State) MustNotContainAnyPiece(store adt.Store, pieceCIDs []builtin.Che found, err := pieces.Get(abi.CidKey(pieceCID.CID), &out) if err != nil { - return xerrors.Errorf("failed to get piece %s: %w", pieceCID, err) + return false, xerrors.Errorf("failed to get piece %s: %w", pieceCID, err) } if !found { continue @@ -1363,26 +1365,26 @@ func (st *State) MustNotContainAnyPiece(store adt.Store, pieceCIDs []builtin.Che sno := abi.SectorNumber(out) _, found, err = st.GetPrecommittedSector(store, sno) if err != nil { - return xerrors.Errorf("failed to get precommit %d: %s", sno, pieceCID) + return false, xerrors.Errorf("failed to get precommit %d, %s: %w", sno, pieceCID, err) } if found { - return xerrors.Errorf("piece contained in precommit %d: %s", sno, pieceCID) + return true, nil } // check exist in live sector live, err := st.isSectorLive(store, sno) if err != nil { - return xerrors.Errorf("failed to check sector live %d, %s: %w", sno, pieceCID, err) + return false, xerrors.Errorf("failed to check sector live %d, %s: %w", sno, pieceCID, err) } if live { - return xerrors.Errorf("piece in active %d: %s", sno, pieceCID) + return true, nil } // Otherwise sector already removed, but we did not remove this piece record. // } - return nil + return false, nil } func (st *State) isSectorLive(store adt.Store, sector abi.SectorNumber) (bool, error) { diff --git a/actors/builtin/miner/miner_test.go b/actors/builtin/miner/miner_test.go index 6504dcf4e..3ab8240f7 100644 --- a/actors/builtin/miner/miner_test.go +++ b/actors/builtin/miner/miner_test.go @@ -6,6 +6,7 @@ import ( "context" "encoding/binary" "fmt" + "strconv" "strings" "testing" @@ -139,7 +140,7 @@ func TestConstruction(t *testing.T) { rt.StoreGet(deadlines.Due[i], &deadline) assert.True(t, deadline.Partitions.Defined()) assert.True(t, deadline.ExpirationsEpochs.Defined()) - assertEmptyBitfield(t, deadline.PostSubmissions) + assertEmptyBitfield(t, deadline.PartitionsPoSted) assertEmptyBitfield(t, deadline.EarlyTerminations) assert.Equal(t, uint64(0), deadline.LiveSectors) } @@ -468,7 +469,7 @@ func TestCommitments(t *testing.T) { require.NoError(t, err) deadline, partition := actor.getDeadlineAndPartition(rt, dlIdx, pIdx) assert.Equal(t, uint64(1), deadline.LiveSectors) - assertEmptyBitfield(t, deadline.PostSubmissions) + assertEmptyBitfield(t, deadline.PartitionsPoSted) assertEmptyBitfield(t, deadline.EarlyTerminations) // quant := st.QuantSpecForDeadline(dlIdx) @@ -1125,7 +1126,7 @@ func TestCCUpgrade(t *testing.T) { require.NoError(t, err) sectorArr, err := miner.LoadSectors(rt.AdtStore(), st.Sectors) require.NoError(t, err) - newFaults, _, _, err := partition.DeclareFaults(rt.AdtStore(), sectorArr, bf(uint64(oldSectors[0].SectorNumber)), 100000, + newFaults, _, _, err := partition.RecordFaults(rt.AdtStore(), sectorArr, bf(uint64(oldSectors[0].SectorNumber)), 100000, actor.sectorSize, quant) require.NoError(t, err) assertBitfieldEquals(t, newFaults, uint64(oldSectors[0].SectorNumber)) @@ -1804,7 +1805,9 @@ func TestWindowPost(t *testing.T) { WithEpoch(precommitEpoch). WithBalance(bigBalance, big.Zero()) - t.Run("test proof", func(t *testing.T) { + testBasicPoSt := func(disputeSucceed bool) { + proofs := makePoStProofs(actor.windowPostProofType) + rt := builder.Build(t) actor.constructAndVerify(rt) store := rt.AdtStore() @@ -1825,17 +1828,53 @@ func TestWindowPost(t *testing.T) { partitions := []miner.PoStPartition{ {Index: pIdx, Skipped: bitfield.New()}, } - actor.submitWindowPoSt(rt, dlinfo, partitions, []*miner.SectorOnChainInfo{sector}, &poStConfig{ + actor.submitWindowPoStRaw(rt, dlinfo, partitions, []*miner.SectorOnChainInfo{sector}, proofs, &poStConfig{ expectedPowerDelta: pwr, }) // Verify proof recorded deadline := actor.getDeadline(rt, dlIdx) - assertBitfieldEquals(t, deadline.PostSubmissions, pIdx) + assertBitfieldEquals(t, deadline.PartitionsPoSted, pIdx) + + postsCid := deadline.OptimisticPoStSubmissions + + posts, err := adt.AsArray(store, postsCid, miner.DeadlineOptimisticPoStSubmissionsAmtBitwidth) + require.NoError(t, err) + require.EqualValues(t, posts.Length(), 1) + var post miner.WindowedPoSt + found, err := posts.Get(0, &post) + require.NoError(t, err) + require.True(t, found) + assertBitfieldEquals(t, post.Partitions, pIdx) // Advance to end-of-deadline cron to verify no penalties. advanceDeadline(rt, actor, &cronConfig{}) actor.checkState(rt) + + deadline = actor.getDeadline(rt, dlIdx) + + // Proofs should exist in snapshot. + require.Equal(t, deadline.OptimisticPoStSubmissionsSnapshot, postsCid) + + var result *poStDisputeResult + if disputeSucceed { + // expectedFee := miner.PledgePenaltyForInvalidWindowPoSt(actor.epochRewardSmooth, actor.epochQAPowerSmooth, pwr.QA) + result = &poStDisputeResult{ + expectedPowerDelta: pwr.Neg(), + expectedPenalty: big.Zero(), + expectedReward: miner.BaseRewardForDisputedWindowPoSt, + expectedPledgeDelta: big.Zero(), + } + } + actor.disputeWindowPoSt(rt, dlinfo, 0, []*miner.SectorOnChainInfo{sector}, result) + } + + t.Run("test proof", func(t *testing.T) { + testBasicPoSt(true) + }) + + t.Run("test bad proof accepted and disputed", func(t *testing.T) { + testBasicPoSt(false) }) t.Run("test duplicate proof rejected", func(t *testing.T) { @@ -1865,7 +1904,7 @@ func TestWindowPost(t *testing.T) { // Verify proof recorded deadline := actor.getDeadline(rt, dlIdx) - assertBitfieldEquals(t, deadline.PostSubmissions, pIdx) + assertBitfieldEquals(t, deadline.PartitionsPoSted, pIdx) // Submit a duplicate proof for the same partition. This will be rejected because after ignoring the // already-proven partition, there are no sectors remaining. @@ -1943,7 +1982,7 @@ func TestWindowPost(t *testing.T) { }) // Verify proof recorded deadline := actor.getDeadline(rt, dlIdx) - assertBitfieldEquals(t, deadline.PostSubmissions, 0) + assertBitfieldEquals(t, deadline.PartitionsPoSted, 0) } { // Attempt PoSt for both partitions, thus duplicating proof for partition 0, so rejected @@ -1973,7 +2012,7 @@ func TestWindowPost(t *testing.T) { }) // Verify both proofs now recorded deadline := actor.getDeadline(rt, dlIdx) - assertBitfieldEquals(t, deadline.PostSubmissions, 0, 1) + assertBitfieldEquals(t, deadline.PartitionsPoSted, 0, 1) } // Advance to end-of-deadline cron to verify no penalties. @@ -2031,6 +2070,15 @@ func TestWindowPost(t *testing.T) { assertBitfieldEmpty(t, partition.Faults) assertBitfieldEmpty(t, partition.Recoveries) + // We restored power, so we should not have recorded a post. + deadline = actor.getDeadline(rt, dlIdx) + assertBitfieldEquals(t, deadline.PartitionsPoSted, pIdx) + postsCid := deadline.OptimisticPoStSubmissions + posts, err := adt.AsArray(rt.AdtStore(), postsCid, + miner.DeadlineOptimisticPoStSubmissionsAmtBitwidth) + require.NoError(t, err) + require.EqualValues(t, posts.Length(), 0) + // Next deadline cron does not charge for the fault advanceDeadline(rt, actor, &cronConfig{}) @@ -2239,6 +2287,180 @@ func TestWindowPost(t *testing.T) { }) actor.checkState(rt) }) + + t.Run("cannot dispute posts when the challenge window is open", func(t *testing.T) { + proofs := makePoStProofs(actor.windowPostProofType) + + rt := builder.Build(t) + actor.constructAndVerify(rt) + store := rt.AdtStore() + sector := actor.commitAndProveSectors(rt, 1 /* , defaultSectorExpiration */, nil)[0] + pwr := miner.PowerForSector(actor.sectorSize, sector) + + st := getState(rt) + dlIdx, pIdx, err := st.FindSector(store, sector.SectorNumber) + require.NoError(t, err) + + // Skip over deadlines until the beginning of the one with the new sector + dlinfo := actor.deadline(rt) + for dlinfo.Index != dlIdx { + dlinfo = advanceDeadline(rt, actor, &cronConfig{}) + } + + // Submit PoSt + partitions := []miner.PoStPartition{ + {Index: pIdx, Skipped: bitfield.New()}, + } + actor.submitWindowPoStRaw(rt, dlinfo, partitions, []*miner.SectorOnChainInfo{sector}, proofs, &poStConfig{ + expectedPowerDelta: pwr, + }) + + // Dispute it. + params := miner.DisputeWindowedPoStParams{ + Deadline: dlinfo.Index, + PoStIndex: 0, + } + + rt.SetCaller(actor.worker, builtin.AccountActorCodeID) + rt.ExpectValidateCallerType(builtin.CallerTypesSignable...) + + // expectQueryNetworkInfo(rt, actor) + + rt.ExpectAbortContainsMessage(exitcode.ErrForbidden, "can only dispute window posts during the dispute window", func() { + rt.Call(actor.a.DisputeWindowedPoSt, ¶ms) + }) + rt.Verify() + }) + t.Run("can dispute up till window end, but not after", func(t *testing.T) { + rt := builder.Build(t) + actor.constructAndVerify(rt) + store := rt.AdtStore() + sector := actor.commitAndProveSectors(rt, 1 /* , defaultSectorExpiration */, nil)[0] + + st := getState(rt) + dlIdx, _, err := st.FindSector(store, sector.SectorNumber) + require.NoError(t, err) + + nextDl := miner.NewDeadlineInfo(st.ProvingPeriodStart, dlIdx, rt.Epoch()). + NextNotElapsed() + + advanceAndSubmitPoSts(rt, actor, sector) + + windowEnd := nextDl.Close + miner.WPoStDisputeWindow + + // first, try to dispute right before the window end. + // We expect this to fail "normally" (fail to disprove). + rt.SetEpoch(windowEnd - 1) + actor.disputeWindowPoSt(rt, nextDl, 0, []*miner.SectorOnChainInfo{sector}, nil) + + // Now set the epoch at the window end. We expect a different error. + rt.SetEpoch(windowEnd) + + // Now try to dispute. + params := miner.DisputeWindowedPoStParams{ + Deadline: dlIdx, + PoStIndex: 0, + } + + rt.SetCaller(actor.worker, builtin.AccountActorCodeID) + rt.ExpectValidateCallerType(builtin.CallerTypesSignable...) + + // currentReward := reward.ThisEpochRewardReturn{ + // ThisEpochBaselinePower: actor.baselinePower, + // ThisEpochRewardSmoothed: actor.epochRewardSmooth, + // } + // rt.ExpectSend(builtin.RewardActorAddr, builtin.MethodsReward.ThisEpochReward, nil, big.Zero(), ¤tReward, exitcode.Ok) + + // networkPower := big.NewIntUnsigned(1 << 50) + // rt.ExpectSend(builtin.StoragePowerActorAddr, builtin.MethodsPower.CurrentTotalPower, nil, big.Zero(), + // &power.CurrentTotalPowerReturn{ + // RawBytePower: networkPower, + // QualityAdjPower: networkPower, + // PledgeCollateral: actor.networkPledge, + // QualityAdjPowerSmoothed: actor.epochQAPowerSmooth, + // }, + // exitcode.Ok) + + rt.ExpectAbortContainsMessage(exitcode.ErrForbidden, "can only dispute window posts during the dispute window", func() { + rt.Call(actor.a.DisputeWindowedPoSt, ¶ms) + }) + rt.Verify() + }) + + t.Run("can't dispute up with an invalid deadline", func(t *testing.T) { + rt := builder.Build(t) + actor.constructAndVerify(rt) + + params := miner.DisputeWindowedPoStParams{ + Deadline: 336, + PoStIndex: 0, + } + + rt.SetCaller(actor.worker, builtin.AccountActorCodeID) + rt.ExpectValidateCallerType(builtin.CallerTypesSignable...) + + rt.ExpectAbortContainsMessage(exitcode.ErrIllegalArgument, "invalid deadline", func() { + rt.Call(actor.a.DisputeWindowedPoSt, ¶ms) + }) + rt.Verify() + }) + + t.Run("can dispute test after proving period changes", func(t *testing.T) { + rt := builder.Build(t) + actor.constructAndVerify(rt) + + periodStart := actor.deadline(rt).NextPeriodStart() + + // go to the next deadline 0 + rt.SetEpoch(periodStart) + + // fill one partition in each mutable deadline. + numSectors := int(actor.partitionSize * (miner.WPoStPeriodDeadlines - 2)) + + // creates a partition in every deadline except 0 and 47 + sectors := actor.commitAndProveSectors(rt, numSectors /* , defaultSectorExpiration */, nil) + actor.t.Log("here") + + // prove every sector once to activate power. This + // simplifies the test a bit. + advanceAndSubmitPoSts(rt, actor, sectors...) + target := miner.WPoStPeriodDeadlines - 2 + // Make sure we're in the correct deadline. We should + // finish at deadline 2 because precommit takes some + // time.(at 0 in epik) + dlinfo := actor.deadline(rt) + require.True(t, dlinfo.Index < target, + "we need to be before the target deadline for this test to make sense") + + // Now challenge find the sectors in the last partition. + _, partition := actor.getDeadlineAndPartition(rt, target, 0) + var targetSectors []*miner.SectorOnChainInfo + err := partition.Sectors.ForEach(func(i uint64) error { + for _, sector := range sectors { + if uint64(sector.SectorNumber) == i { + targetSectors = append(targetSectors, sector) + } + } + return nil + }) + require.NoError(t, err) + require.NotEmpty(t, targetSectors) + + pwr := miner.PowerForSectors(actor.sectorSize, targetSectors) + + // And challenge the last partition. + var result *poStDisputeResult + // expectedFee := miner.PledgePenaltyForInvalidWindowPoSt(actor.epochRewardSmooth, actor.epochQAPowerSmooth, pwr.QA) + result = &poStDisputeResult{ + expectedPowerDelta: pwr.Neg(), + expectedPenalty: big.Zero(), // expectedFee, + expectedReward: miner.BaseRewardForDisputedWindowPoSt, + expectedPledgeDelta: big.Zero(), + } + + targetDlInfo := miner.NewDeadlineInfo(periodStart, target, rt.Epoch()) + actor.disputeWindowPoSt(rt, targetDlInfo, 0, targetSectors, result) + }) } func TestProveCommit(t *testing.T) { @@ -2380,8 +2602,9 @@ func TestDeadlineCron(t *testing.T) { // setup state to simulate moving forward all the way to expiry dlIdx, _, err := st.FindSector(rt.AdtStore(), sectors[0].SectorNumber) require.NoError(t, err) - expirationPeriod := (expiration/miner.WPoStProvingPeriod + 1) * miner.WPoStProvingPeriod - st.ProvingPeriodStart = expirationPeriod + remainingEpochs := expiration - st.ProvingPeriodStart + remainingPeriods := remainingEpochs/miner.WPoStProvingPeriod + 1 + st.ProvingPeriodStart += remainingPeriods * miner.WPoStProvingPeriod st.CurrentDeadline = dlIdx rt.ReplaceState(st) @@ -2411,8 +2634,9 @@ func TestDeadlineCron(t *testing.T) { // setup state to simulate moving forward all the way to expiry dlIdx, _, err := st.FindSector(rt.AdtStore(), sectors[0].SectorNumber) require.NoError(t, err) - expirationPeriod := (expiration/miner.WPoStProvingPeriod + 1) * miner.WPoStProvingPeriod - st.ProvingPeriodStart = expirationPeriod + remainingEpochs := expiration - st.ProvingPeriodStart + remainingPeriods := remainingEpochs/miner.WPoStProvingPeriod + 1 + st.ProvingPeriodStart += remainingPeriods * miner.WPoStProvingPeriod st.CurrentDeadline = dlIdx rt.ReplaceState(st) @@ -3149,6 +3373,38 @@ func TestTerminateSectors(t *testing.T) { actor.terminateSectors(rt, sectors, expectedFee, big.Zero()) actor.checkState(rt) }) */ + + t.Run("cannot terminate a sector when the challenge window is open", func(t *testing.T) { + rt := builder.Build(t) + actor.constructAndVerify(rt) + rt.SetEpoch(abi.ChainEpoch(1)) + sectorInfo := actor.commitAndProveSectors(rt, 1 /* , defaultSectorExpiration */, nil) + sector := sectorInfo[0] + + st := getState(rt) + dlIdx, pIdx, err := st.FindSector(rt.AdtStore(), sector.SectorNumber) + require.NoError(t, err) + + // advance into the deadline, but not past it. + dlinfo := actor.deadline(rt) + for dlinfo.Index != dlIdx { + dlinfo = advanceDeadline(rt, actor, &cronConfig{}) + } + + params := &miner.TerminateSectorsParams{Terminations: []miner.TerminationDeclaration{{ + Deadline: dlIdx, + Partition: pIdx, + Sectors: bf(uint64(sector.SectorNumber)), + }}} + rt.SetCaller(actor.worker, builtin.AccountActorCodeID) + rt.ExpectValidateCallerAddr(append(actor.controlAddrs, actor.owner, actor.worker)...) + rt.ExpectAbortContainsMessage(exitcode.ErrIllegalArgument, "cannot terminate sectors in immutable deadline", func() { + rt.Call(actor.a.TerminateSectors, params) + }) + + actor.checkState(rt) + }) + } func TestWithdrawBalance(t *testing.T) { @@ -3333,15 +3589,15 @@ func TestCompactPartitions(t *testing.T) { actor.constructAndVerify(rt) rt.SetEpoch(200) - // create 4 sectors in partition 0 - info := actor.commitAndProveSectors(rt, 4, [][]abi.DealID{{10}, {20}, {30}, {40}}) + // create 2 sectors in partition 0 + info := actor.commitAndProveSectors(rt, 2, [][]abi.DealID{{10}, {20} /* , {30}, {40} */}) advanceAndSubmitPoSts(rt, actor, info...) // prove and activate power. sector1 := info[0].SectorNumber sector2 := info[1].SectorNumber - sector3 := info[2].SectorNumber - sector4 := info[3].SectorNumber + // sector3 := info[2].SectorNumber + // sector4 := info[3].SectorNumber // terminate sector1 rt.SetEpoch(rt.Epoch() + 100) @@ -3359,6 +3615,9 @@ func TestCompactPartitions(t *testing.T) { sectors := bitfield.NewFromSet([]uint64{uint64(sector1)}) actor.terminateSectors(rt, sectors /* expectedFee */, big.Zero()) + // Wait WPoStProofChallengePeriod epochs so we can compact the sector. + advanceToEpochWithCron(rt, actor, rt.Epoch()+miner.WPoStDisputeWindow) + // compacting partition will remove sector1 but retain sector 2, 3 and 4. st := getState(rt) deadlineId, partId, err := st.FindSector(rt.AdtStore(), sector1) @@ -3369,10 +3628,10 @@ func TestCompactPartitions(t *testing.T) { st = getState(rt) assertSectorExists(rt.AdtStore(), st, sector2, partId, deadlineId) - deadlineId, partId, err = st.FindSector(rt.AdtStore(), sector3) - assert.NoError(t, err) - assertSectorExists(rt.AdtStore(), st, sector3, partId, deadlineId) - assertSectorExists(rt.AdtStore(), st, sector4, partId, deadlineId) + // deadlineId, partId, err = st.FindSector(rt.AdtStore(), sector3) + // assert.NoError(t, err) + // assertSectorExists(rt.AdtStore(), st, sector3, partId, deadlineId) + // assertSectorExists(rt.AdtStore(), st, sector4, partId, deadlineId) assertSectorNotFound(rt.AdtStore(), st, sector1) actor.checkState(rt) @@ -3390,12 +3649,21 @@ func TestCompactPartitions(t *testing.T) { // fault sector1 actor.declareFaults(rt, info[0]) - st := getState(rt) - deadlineId, partId, err := st.FindSector(rt.AdtStore(), info[0].SectorNumber) - assert.NoError(t, err) + // TODO: current codes + { + // st := getState(rt) + // deadlineId, partId, err := st.FindSector(rt.AdtStore(), info[0].SectorNumber) + // assert.NoError(t, err) + + // substr := fmt.Sprintf("failed to remove partitions from deadline %d: while removing partitions: cannot remove partition %d: has faults", deadlineId, partId) + // rt.ExpectAbortContainsMessage(exitcode.ErrIllegalArgument, substr, func() { + // Wait WPoStProofChallengePeriod epochs so we can compact the sector. + } + advanceToEpochWithCron(rt, actor, rt.Epoch()+miner.WPoStDisputeWindow) - substr := fmt.Sprintf("failed to remove partitions from deadline %d: while removing partitions: cannot remove partition %d: has faults", deadlineId, partId) - rt.ExpectAbortContainsMessage(exitcode.ErrIllegalArgument, substr, func() { + partId := uint64(0) + deadlineId := uint64(0) + rt.ExpectAbortContainsMessage(exitcode.ErrIllegalArgument, "failed to remove partitions from deadline 0: while removing partitions: cannot remove partition 0: has faults", func() { partitions := bitfield.NewFromSet([]uint64{partId}) actor.compactPartitions(rt, deadlineId, partitions) }) @@ -3406,7 +3674,13 @@ func TestCompactPartitions(t *testing.T) { rt := builder.Build(t) actor.constructAndVerify(rt) - rt.SetEpoch(200) + // Wait until deadline 0 (the one to which we'll assign the + // sector) has elapsed. That'll let us commit, prove, then wait + // finality epochs. + st := getState(rt) + deadlineEpoch := miner.NewDeadlineInfo(st.ProvingPeriodStart, 0, rt.Epoch()).NextNotElapsed().NextOpen() + rt.SetEpoch(deadlineEpoch) + // create 2 sectors in partition 0 info := actor.commitAndProveSectors(rt, 2, [][]abi.DealID{{10}, {20}}) @@ -3414,8 +3688,12 @@ func TestCompactPartitions(t *testing.T) { deadlineId, partId, err := st.FindSector(rt.AdtStore(), info[0].SectorNumber) assert.NoError(t, err) - substr := fmt.Sprintf("failed to remove partitions from deadline %d: while removing partitions: cannot remove partition %d: has unproven sectors", deadlineId, partId) - rt.ExpectAbortContainsMessage(exitcode.ErrIllegalArgument, substr, func() { + // Wait WPoStProofChallengePeriod epochs so we can compact the sector. + advanceToEpochWithCron(rt, actor, rt.Epoch()+miner.WPoStDisputeWindow) + + partId := uint64(0) + deadlineId := uint64(0) + rt.ExpectAbortContainsMessage(exitcode.ErrIllegalArgument, "failed to remove partitions from deadline 0: while removing partitions: cannot remove partition 0: has unproven sectors", func() { partitions := bitfield.NewFromSet([]uint64{partId}) actor.compactPartitions(rt, deadlineId, partitions) }) @@ -3426,24 +3704,75 @@ func TestCompactPartitions(t *testing.T) { rt := builder.Build(t) actor.constructAndVerify(rt) - rt.ExpectAbort(exitcode.ErrIllegalArgument, func() { + rt.ExpectAbortContainsMessage(exitcode.ErrIllegalArgument, "invalid deadline "+strconv.Itoa(int(miner.WPoStPeriodDeadlines)), func() { actor.compactPartitions(rt, miner.WPoStPeriodDeadlines, bitfield.New()) }) actor.checkState(rt) }) - t.Run("fails if deadline is not mutable", func(t *testing.T) { + t.Run("fails if deadline is open for challenging", func(t *testing.T) { rt := builder.Build(t) actor.constructAndVerify(rt) - epoch := abi.ChainEpoch(200) - rt.SetEpoch(epoch) + rt.SetEpoch(periodOffset) + rt.ExpectAbort(exitcode.ErrForbidden, func() { + actor.compactPartitions(rt, 0, bitfield.New()) + }) + actor.checkState(rt) + }) + + t.Run("fails if deadline is next up to be challenged", func(t *testing.T) { + rt := builder.Build(t) + actor.constructAndVerify(rt) + + rt.SetEpoch(periodOffset) rt.ExpectAbort(exitcode.ErrForbidden, func() { actor.compactPartitions(rt, 1, bitfield.New()) }) actor.checkState(rt) }) + t.Run("the deadline after the next deadline should still be open for compaction", func(t *testing.T) { + rt := builder.Build(t) + actor.constructAndVerify(rt) + + rt.SetEpoch(periodOffset) + actor.compactPartitions(rt, 3, bitfield.New()) + actor.checkState(rt) + }) + + t.Run("deadlines challenged last proving period should still be in the dispute window", func(t *testing.T) { + rt := builder.Build(t) + actor.constructAndVerify(rt) + + rt.SetEpoch(periodOffset) + rt.ExpectAbort(exitcode.ErrForbidden, func() { + actor.compactPartitions(rt, miner.WPoStPeriodDeadlines-1, bitfield.New()) + }) + actor.checkState(rt) + }) + + disputeEnd := periodOffset + miner.WPoStChallengeWindow + miner.WPoStDisputeWindow - 1 + t.Run("compaction should be forbidden during the dispute window", func(t *testing.T) { + rt := builder.Build(t) + actor.constructAndVerify(rt) + + rt.SetEpoch(disputeEnd) + rt.ExpectAbort(exitcode.ErrForbidden, func() { + actor.compactPartitions(rt, 0, bitfield.New()) + }) + actor.checkState(rt) + }) + + t.Run("compaction should be allowed following the dispute window", func(t *testing.T) { + rt := builder.Build(t) + actor.constructAndVerify(rt) + + rt.SetEpoch(disputeEnd + 1) + actor.compactPartitions(rt, 0, bitfield.New()) + actor.checkState(rt) + }) + t.Run("fails if partition count is above limit", func(t *testing.T) { rt := builder.Build(t) actor.constructAndVerify(rt) @@ -4799,6 +5128,22 @@ func (h *actorHarness) getPartition(rt *mock.Runtime, deadline *miner.Deadline, return partition } +func (h *actorHarness) getPartitionSnapshot(rt *mock.Runtime, deadline *miner.Deadline, idx uint64) *miner.Partition { + partition, err := deadline.LoadPartitionSnapshot(rt.AdtStore(), idx) + require.NoError(h.t, err) + return partition +} + +func (h *actorHarness) getSubmittedProof(rt *mock.Runtime, deadline *miner.Deadline, idx uint64) *miner.WindowedPoSt { + proofs, err := adt.AsArray(rt.AdtStore(), deadline.OptimisticPoStSubmissionsSnapshot, miner.DeadlineOptimisticPoStSubmissionsAmtBitwidth) + require.NoError(h.t, err) + var post miner.WindowedPoSt + found, err := proofs.Get(idx, &post) + require.NoError(h.t, err) + require.True(h.t, found) + return &post +} + func (h *actorHarness) getDeadlineAndPartition(rt *mock.Runtime, dlIdx, pIdx uint64) (*miner.Deadline, *miner.Partition) { deadline := h.getDeadline(rt, dlIdx) partition := h.getPartition(rt, deadline, pIdx) @@ -5309,31 +5654,163 @@ func (h *actorHarness) advancePastDeadlineEndWithCron(rt *mock.Runtime) { rt.SetEpoch(deadline.NextPeriodStart()) } +type poStDisputeResult struct { + expectedPowerDelta miner.PowerPair + expectedPledgeDelta abi.TokenAmount + expectedPenalty abi.TokenAmount + expectedReward abi.TokenAmount +} + +func (h *actorHarness) disputeWindowPoSt(rt *mock.Runtime, deadline *dline.Info, proofIndex uint64, infos []*miner.SectorOnChainInfo, expectSuccess *poStDisputeResult) { + rt.SetCaller(h.worker, builtin.AccountActorCodeID) + rt.ExpectValidateCallerType(builtin.CallerTypesSignable...) + + // expectQueryNetworkInfo(rt, h) + challengeRand := abi.SealRandomness([]byte{10, 11, 12, 13}) + + // only sectors that are not skipped and not existing non-recovered faults will be verified + allIgnored := bf() + dln := h.getDeadline(rt, deadline.Index) + + post := h.getSubmittedProof(rt, dln, proofIndex) + + var err error + err = post.Partitions.ForEach(func(idx uint64) error { + partition := h.getPartitionSnapshot(rt, dln, idx) + allIgnored, err = bitfield.MergeBitFields(allIgnored, partition.Faults) + require.NoError(h.t, err) + noRecoveries, err := partition.Recoveries.IsEmpty() + require.NoError(h.t, err) + require.True(h.t, noRecoveries) + return nil + }) + require.NoError(h.t, err) + + // find the first non-faulty, non-skipped sector in poSt to replace all faulty sectors. + var goodInfo *miner.SectorOnChainInfo + for _, ci := range infos { + contains, err := allIgnored.IsSet(uint64(ci.SectorNumber)) + require.NoError(h.t, err) + if !contains { + goodInfo = ci + break + } + } + require.NotNil(h.t, goodInfo, "stored proof should prove at least one sector") + + var buf bytes.Buffer + receiver := rt.Receiver() + err = receiver.MarshalCBOR(&buf) + require.NoError(h.t, err) + + rt.ExpectGetRandomnessBeacon(crypto.DomainSeparationTag_WindowedPoStChallengeSeed, deadline.Challenge, buf.Bytes(), abi.Randomness(challengeRand)) + + actorId, err := addr.IDFromAddress(h.receiver) + require.NoError(h.t, err) + + proofInfos := make([]proof.SectorInfo, len(infos)) + for i, ci := range infos { + si := ci + contains, err := allIgnored.IsSet(uint64(ci.SectorNumber)) + require.NoError(h.t, err) + if contains { + si = goodInfo + } + proofInfos[i] = proof.SectorInfo{ + SealProof: si.SealProof, + SectorNumber: si.SectorNumber, + SealedCID: si.SealedCID, + } + } + + vi := proof.WindowPoStVerifyInfo{ + Randomness: abi.PoStRandomness(challengeRand), + Proofs: post.Proofs, + ChallengedSectors: proofInfos, + Prover: abi.ActorID(actorId), + } + var verifResult error + if expectSuccess != nil { + // if we succeed at challenging, proof verification needs to fail. + verifResult = fmt.Errorf("invalid post") + } + rt.ExpectVerifyPoSt(vi, verifResult) + + if expectSuccess != nil { + // expect power update + if !expectSuccess.expectedPowerDelta.IsZero() { + claim := &power.UpdateClaimedPowerParams{ + RawByteDelta: expectSuccess.expectedPowerDelta.Raw, + QualityAdjustedDelta: expectSuccess.expectedPowerDelta.QA, + } + rt.ExpectSend(builtin.StoragePowerActorAddr, builtin.MethodsPower.UpdateClaimedPower, claim, abi.NewTokenAmount(0), + nil, exitcode.Ok) + } + // expect reward + if !expectSuccess.expectedReward.IsZero() { + rt.ExpectSend(h.worker, builtin.MethodSend, nil, expectSuccess.expectedReward, nil, exitcode.Ok) + } + // expect penalty + if !expectSuccess.expectedPenalty.IsZero() { + rt.ExpectSend(builtin.BurntFundsActorAddr, builtin.MethodSend, nil, expectSuccess.expectedPenalty, nil, exitcode.Ok) + } + // expect pledge update + if !expectSuccess.expectedPledgeDelta.IsZero() { + rt.ExpectSend(builtin.StoragePowerActorAddr, builtin.MethodsPower.UpdatePledgeTotal, + &expectSuccess.expectedPledgeDelta, abi.NewTokenAmount(0), nil, exitcode.Ok) + } + } + + params := miner.DisputeWindowedPoStParams{ + Deadline: deadline.Index, + PoStIndex: proofIndex, + } + if expectSuccess == nil { + rt.ExpectAbortContainsMessage(exitcode.ErrIllegalArgument, "failed to dispute valid post", func() { + rt.Call(h.a.DisputeWindowedPoSt, ¶ms) + }) + } else { + rt.Call(h.a.DisputeWindowedPoSt, ¶ms) + } + rt.Verify() +} + type poStConfig struct { expectedPowerDelta miner.PowerPair verificationError error } func (h *actorHarness) submitWindowPoSt(rt *mock.Runtime, deadline *dline.Info, partitions []miner.PoStPartition, infos []*miner.SectorOnChainInfo, poStCfg *poStConfig) { + h.submitWindowPoStRaw(rt, deadline, partitions, infos, makePoStProofs(h.windowPostProofType), poStCfg) +} + +func (h *actorHarness) submitWindowPoStRaw(rt *mock.Runtime, deadline *dline.Info, partitions []miner.PoStPartition, infos []*miner.SectorOnChainInfo, proofs []proof.PoStProof, poStCfg *poStConfig) { rt.SetCaller(h.worker, builtin.AccountActorCodeID) commitRand := abi.Randomness("chaincommitment") rt.ExpectGetRandomnessTickets(crypto.DomainSeparationTag_PoStChainCommit, deadline.Challenge, nil, commitRand) rt.ExpectValidateCallerAddr(append(h.controlAddrs, h.owner, h.worker)...) - proofs := makePoStProofs(h.windowPostProofType) challengeRand := abi.SealRandomness([]byte{10, 11, 12, 13}) // only sectors that are not skipped and not existing non-recovered faults will be verified allIgnored := bf() + allRecovered := bf() dln := h.getDeadline(rt, deadline.Index) + for _, p := range partitions { partition := h.getPartition(rt, dln, p.Index) expectedFaults, err := bitfield.SubtractBitField(partition.Faults, partition.Recoveries) require.NoError(h.t, err) allIgnored, err = bitfield.MultiMerge(allIgnored, expectedFaults, p.Skipped) require.NoError(h.t, err) + recovered, err := bitfield.SubtractBitField(partition.Recoveries, p.Skipped) + require.NoError(h.t, err) + allRecovered, err = bitfield.MergeBitFields(allRecovered, recovered) + require.NoError(h.t, err) } + optimistic, err := allRecovered.IsEmpty() + require.NoError(h.t, err) // find the first non-faulty, non-skipped sector in poSt to replace all faulty sectors. var goodInfo *miner.SectorOnChainInfo @@ -5347,7 +5824,7 @@ func (h *actorHarness) submitWindowPoSt(rt *mock.Runtime, deadline *dline.Info, } // goodInfo == nil indicates all the sectors have been skipped and should PoSt verification should not occur - if goodInfo != nil { + if !optimistic && goodInfo != nil { var buf bytes.Buffer receiver := rt.Receiver() err := receiver.MarshalCBOR(&buf) @@ -5387,6 +5864,7 @@ func (h *actorHarness) submitWindowPoSt(rt *mock.Runtime, deadline *dline.Info, } rt.ExpectVerifyPoSt(vi, verifResult) } + if poStCfg != nil { // expect power update if !poStCfg.expectedPowerDelta.IsZero() { diff --git a/actors/builtin/miner/monies.go b/actors/builtin/miner/monies.go index 1ac333472..6f2b94d57 100644 --- a/actors/builtin/miner/monies.go +++ b/actors/builtin/miner/monies.go @@ -42,6 +42,9 @@ var ContinuedFaultProjectionPeriod = abi.ChainEpoch((builtin.EpochsInDay * Conti var TerminationPenaltyLowerBoundProjectionPeriod = abi.ChainEpoch((builtin.EpochsInDay * 35) / 10) // PARAM_SPEC +// FF + 2BR +var InvalidWindowPoStProjectionPeriod = abi.ChainEpoch(ContinuedFaultProjectionPeriod + 2*builtin.EpochsInDay) // PARAM_SPEC + // Fraction of assumed block reward penalized when a sector is terminated. var TerminationRewardFactor = builtin.BigFrac{ // PARAM_SPEC Numerator: big.NewInt(1), @@ -54,6 +57,11 @@ const TerminationLifetimeCap = 140 // PARAM_SPEC */ // Multiplier of whole per-winner rewards for a consensus fault penalty. const ConsensusFaultFactor = 5 +// Base reward for successfully disputing a window posts proofs. +var BaseRewardForDisputedWindowPoSt = big.Mul(big.NewInt(4), builtin.TokenPrecision) // PARAM_SPEC +// // Base penalty for a successful disputed window post proof. +// var BasePenaltyForDisputedWindowPoSt = big.Mul(big.NewInt(20), builtin.TokenPrecision) // PARAM_SPEC + /* // // Fraction of total reward (block reward + gas reward) to be locked up as of V6 // var LockedRewardFactorNum = big.NewInt(75) // var LockedRewardFactorDenom = big.NewInt(100) @@ -117,8 +125,15 @@ func PledgePenaltyForTermination(dayReward abi.TokenAmount, sectorAge abi.ChainE big.Mul(big.NewInt(builtin.EpochsInDay), TerminationRewardFactor.Denominator)))) // (epochs*AttoFIL/day -> AttoFIL) } -// no PreCommit deposit required - // Computes the PreCommit deposit given sector qa weight and current network conditions. +// The penalty for optimistically proving a sector with an invalid window PoSt. +func PledgePenaltyForInvalidWindowPoSt(rewardEstimate, networkQAPowerEstimate smoothing.FilterEstimate, qaSectorPower abi.StoragePower) abi.TokenAmount { + return big.Add( + ExpectedRewardForPower(rewardEstimate, networkQAPowerEstimate, qaSectorPower, InvalidWindowPoStProjectionPeriod), + BasePenaltyForDisputedWindowPoSt, + ) +} + +// Computes the PreCommit deposit given sector qa weight and current network conditions. // PreCommit Deposit = BR(PreCommitDepositProjectionPeriod) func PreCommitDepositForPower(rewardEstimate, networkQAPowerEstimate smoothing.FilterEstimate, qaSectorPower abi.StoragePower) abi.TokenAmount { return ExpectedRewardForPower(rewardEstimate, networkQAPowerEstimate, qaSectorPower, PreCommitDepositProjectionPeriod) diff --git a/actors/builtin/miner/partition_state.go b/actors/builtin/miner/partition_state.go index 23e337f98..112a18843 100644 --- a/actors/builtin/miner/partition_state.go +++ b/actors/builtin/miner/partition_state.go @@ -236,7 +236,7 @@ func (p *Partition) addFaults( // - The sectors' expirations are rescheduled to the fault expiration epoch, as "early" (if not expiring earlier). // // Returns the power of the now-faulty sectors. -func (p *Partition) DeclareFaults( +func (p *Partition) RecordFaults( store adt.Store, sectors Sectors, sectorNos bitfield.BitField, faultExpirationEpoch abi.ChainEpoch, ssize abi.SectorSize, quant QuantSpec, ) (newFaults bitfield.BitField, powerDelta, newFaultyPower PowerPair, err error) { diff --git a/actors/builtin/miner/partition_state_test.go b/actors/builtin/miner/partition_state_test.go index b8e78d2e8..1c13daed9 100644 --- a/actors/builtin/miner/partition_state_test.go +++ b/actors/builtin/miner/partition_state_test.go @@ -89,7 +89,7 @@ func TestPartitions(t *testing.T) { sectorArr := sectorsArr(t, store, sectors) faultSet := bf(4, 5) - _, powerDelta, newFaultyPower, err := partition.DeclareFaults(store, sectorArr, faultSet, abi.ChainEpoch(7), sectorSize, quantSpec) + _, powerDelta, newFaultyPower, err := partition.RecordFaults(store, sectorArr, faultSet, abi.ChainEpoch(7), sectorSize, quantSpec) require.NoError(t, err) expectedFaultyPower := miner.PowerForSectors(sectorSize, selectSectors(t, sectors, faultSet)) @@ -124,7 +124,7 @@ func TestPartitions(t *testing.T) { sectorArr := sectorsArr(t, store, sectors) faultSet := bf(4, 5) - _, powerDelta, newFaultyPower, err := partition.DeclareFaults(store, sectorArr, faultSet, abi.ChainEpoch(7), sectorSize, quantSpec) + _, powerDelta, newFaultyPower, err := partition.RecordFaults(store, sectorArr, faultSet, abi.ChainEpoch(7), sectorSize, quantSpec) require.NoError(t, err) expectedFaultyPower := miner.PowerForSectors(sectorSize, selectSectors(t, sectors, faultSet)) @@ -132,7 +132,7 @@ func TestPartitions(t *testing.T) { assert.True(t, powerDelta.Equals(expectedFaultyPower.Neg())) faultSet = bf(5, 6) - newFaults, powerDelta, newFaultyPower, err := partition.DeclareFaults(store, sectorArr, faultSet, abi.ChainEpoch(3), sectorSize, quantSpec) + newFaults, powerDelta, newFaultyPower, err := partition.RecordFaults(store, sectorArr, faultSet, abi.ChainEpoch(3), sectorSize, quantSpec) require.NoError(t, err) assertBitfieldEquals(t, newFaults, 6) expectedFaultyPower = miner.PowerForSectors(sectorSize, selectSectors(t, sectors, bf(6))) @@ -154,7 +154,7 @@ func TestPartitions(t *testing.T) { sectorArr := sectorsArr(t, store, sectors) faultSet := bf(99) - _, _, _, err := partition.DeclareFaults(store, sectorArr, faultSet, abi.ChainEpoch(7), sectorSize, quantSpec) + _, _, _, err := partition.RecordFaults(store, sectorArr, faultSet, abi.ChainEpoch(7), sectorSize, quantSpec) require.Error(t, err) assert.Contains(t, err.Error(), "not all sectors are assigned to the partition") }) @@ -165,7 +165,7 @@ func TestPartitions(t *testing.T) { // make 4, 5 and 6 faulty faultSet := bf(4, 5, 6) - _, _, _, err := partition.DeclareFaults(store, sectorArr, faultSet, abi.ChainEpoch(7), sectorSize, quantSpec) + _, _, _, err := partition.RecordFaults(store, sectorArr, faultSet, abi.ChainEpoch(7), sectorSize, quantSpec) require.NoError(t, err) // add 4 and 5 as recoveries @@ -182,7 +182,7 @@ func TestPartitions(t *testing.T) { // make 4, 5 and 6 faulty faultSet := bf(4, 5, 6) - _, _, _, err := partition.DeclareFaults(store, sectorArr, faultSet, abi.ChainEpoch(7), sectorSize, quantSpec) + _, _, _, err := partition.RecordFaults(store, sectorArr, faultSet, abi.ChainEpoch(7), sectorSize, quantSpec) require.NoError(t, err) // add 4 and 5 as recoveries @@ -191,14 +191,14 @@ func TestPartitions(t *testing.T) { require.NoError(t, err) // declaring no faults doesn't do anything. - newFaults, _, _, err := partition.DeclareFaults(store, sectorArr, bf(), abi.ChainEpoch(7), sectorSize, quantSpec) + newFaults, _, _, err := partition.RecordFaults(store, sectorArr, bf(), abi.ChainEpoch(7), sectorSize, quantSpec) require.NoError(t, err) assertBitfieldEmpty(t, newFaults) // no new faults. assertPartitionState(t, store, partition, quantSpec, sectorSize, sectors, bf(1, 2, 3, 4, 5, 6), bf(4, 5, 6), bf(4, 5), bf(), bf()) // removing sector 5 alters recovery set and recovery power - newFaults, _, _, err = partition.DeclareFaults(store, sectorArr, bf(5), abi.ChainEpoch(10), sectorSize, quantSpec) + newFaults, _, _, err = partition.RecordFaults(store, sectorArr, bf(5), abi.ChainEpoch(10), sectorSize, quantSpec) require.NoError(t, err) assertBitfieldEmpty(t, newFaults) // these faults aren't new. @@ -211,7 +211,7 @@ func TestPartitions(t *testing.T) { // make 4, 5 and 6 faulty faultSet := bf(4, 5, 6) - _, _, _, err := partition.DeclareFaults(store, sectorArr, faultSet, abi.ChainEpoch(7), sectorSize, quantSpec) + _, _, _, err := partition.RecordFaults(store, sectorArr, faultSet, abi.ChainEpoch(7), sectorSize, quantSpec) require.NoError(t, err) // add 4 and 5 as recoveries @@ -243,7 +243,7 @@ func TestPartitions(t *testing.T) { // make 4, 5 and 6 faulty faultSet := bf(4, 5, 6) - _, _, _, err := partition.DeclareFaults(store, sectorArr, faultSet, abi.ChainEpoch(7), sectorSize, quantSpec) + _, _, _, err := partition.RecordFaults(store, sectorArr, faultSet, abi.ChainEpoch(7), sectorSize, quantSpec) require.NoError(t, err) // add 3, 4 and 5 as recoveries. 3 is not faulty so it's skipped @@ -282,7 +282,7 @@ func TestPartitions(t *testing.T) { // Mark sector 2 faulty, we should skip it when rescheduling faultSet := bf(2) - _, _, _, err := partition.DeclareFaults(store, sectorArr, faultSet, abi.ChainEpoch(7), sectorSize, quantSpec) + _, _, _, err := partition.RecordFaults(store, sectorArr, faultSet, abi.ChainEpoch(7), sectorSize, quantSpec) require.NoError(t, err) // Add an unproven sector. We _should_ reschedule the expiration. @@ -365,7 +365,7 @@ func TestPartitions(t *testing.T) { // fault sector 2 faultSet := bf(2) - _, _, _, err := partition.DeclareFaults(store, sectorArr, faultSet, abi.ChainEpoch(7), sectorSize, quantSpec) + _, _, _, err := partition.RecordFaults(store, sectorArr, faultSet, abi.ChainEpoch(7), sectorSize, quantSpec) require.NoError(t, err) // remove 3 sectors starting with 2 @@ -415,7 +415,7 @@ func TestPartitions(t *testing.T) { // fault sector 3, 4, 5 and 6 faultSet := bf(3, 4, 5, 6) - _, _, _, err = partition.DeclareFaults(store, sectorArr, faultSet, abi.ChainEpoch(7), sectorSize, quantSpec) + _, _, _, err = partition.RecordFaults(store, sectorArr, faultSet, abi.ChainEpoch(7), sectorSize, quantSpec) require.NoError(t, err) // mark 4and 5 as a recoveries @@ -501,7 +501,7 @@ func TestPartitions(t *testing.T) { require.NoError(t, err) // Fault declaration for terminated sectors fails. - newFaults, _, _, err := partition.DeclareFaults(store, sectorArr, terminations, abi.ChainEpoch(5), sectorSize, quantSpec) + newFaults, _, _, err := partition.RecordFaults(store, sectorArr, terminations, abi.ChainEpoch(5), sectorSize, quantSpec) require.NoError(t, err) empty, err := newFaults.IsEmpty() require.NoError(t, err) @@ -514,7 +514,7 @@ func TestPartitions(t *testing.T) { // add one fault with an early termination faultSet := bf(4) - _, _, _, err := partition.DeclareFaults(store, sectorArr, faultSet, abi.ChainEpoch(2), sectorSize, quantSpec) + _, _, _, err := partition.RecordFaults(store, sectorArr, faultSet, abi.ChainEpoch(2), sectorSize, quantSpec) require.NoError(t, err) // pop first expiration set @@ -552,7 +552,7 @@ func TestPartitions(t *testing.T) { store, partition := setup(t) sectorArr := sectorsArr(t, store, sectors) - _, _, _, err := partition.DeclareFaults(store, sectorArr, bf(5), abi.ChainEpoch(2), sectorSize, quantSpec) + _, _, _, err := partition.RecordFaults(store, sectorArr, bf(5), abi.ChainEpoch(2), sectorSize, quantSpec) require.NoError(t, err) // add a recovery @@ -594,7 +594,7 @@ func TestPartitions(t *testing.T) { // make 4, 5 and 6 faulty faultSet := bf(4, 5, 6) - _, _, _, err = partition.DeclareFaults(store, sectorArr, faultSet, abi.ChainEpoch(7), sectorSize, quantSpec) + _, _, _, err = partition.RecordFaults(store, sectorArr, faultSet, abi.ChainEpoch(7), sectorSize, quantSpec) require.NoError(t, err) // add 4 and 5 as recoveries @@ -633,7 +633,7 @@ func TestPartitions(t *testing.T) { // fault sector 3, 4, 5 and 6 faultSet := bf(3, 4, 5, 6) - _, _, _, err := partition.DeclareFaults(store, sectorArr, faultSet, abi.ChainEpoch(7), sectorSize, quantSpec) + _, _, _, err := partition.RecordFaults(store, sectorArr, faultSet, abi.ChainEpoch(7), sectorSize, quantSpec) require.NoError(t, err) // mark 4and 5 as a recoveries @@ -787,7 +787,7 @@ func TestRecordSkippedFaults(t *testing.T) { // declare 4 & 5 as faulty faultSet := bf(4, 5) - _, _, _, err = partition.DeclareFaults(store, sectorArr, faultSet, abi.ChainEpoch(7), sectorSize, quantSpec) + _, _, _, err = partition.RecordFaults(store, sectorArr, faultSet, abi.ChainEpoch(7), sectorSize, quantSpec) require.NoError(t, err) assertPartitionState(t, store, partition, quantSpec, sectorSize, sectors, bf(1, 2, 3, 4, 5, 6), faultSet, bf(), terminations, bf()) @@ -810,7 +810,7 @@ func TestRecordSkippedFaults(t *testing.T) { // make 4, 5 and 6 faulty faultSet := bf(4, 5, 6) - _, _, _, err := partition.DeclareFaults(store, sectorArr, faultSet, abi.ChainEpoch(7), sectorSize, quantSpec) + _, _, _, err := partition.RecordFaults(store, sectorArr, faultSet, abi.ChainEpoch(7), sectorSize, quantSpec) require.NoError(t, err) // add 4 and 5 as recoveries diff --git a/actors/builtin/miner/policy.go b/actors/builtin/miner/policy.go index 2ea2be07d..6b1a77eed 100644 --- a/actors/builtin/miner/policy.go +++ b/actors/builtin/miner/policy.go @@ -40,6 +40,31 @@ func init() { if abi.ChainEpoch(WPoStPeriodDeadlines)*WPoStChallengeWindow != WPoStProvingPeriod { panic(fmt.Sprintf("incompatible proving period %d and challenge window %d", WPoStProvingPeriod, WPoStChallengeWindow)) } + + // Check to make sure the dispute window is longer than finality so there's always some time to dispute bad proofs. + if WPoStDisputeWindow <= ChainFinality { + panic(fmt.Sprintf("the proof dispute period %d must exceed finality %d", WPoStDisputeWindow, ChainFinality)) + } + + // A deadline becomes immutable one challenge window before it's challenge window opens. + // The challenge lookback must fall within this immutability period. + if WPoStChallengeLookback > WPoStChallengeWindow { + panic("the challenge lookback cannot exceed one challenge window") + } + + // Deadlines are immutable when the challenge window is open, and during + // the previous challenge window. + immutableWindow := 2 * WPoStChallengeWindow + + // We want to reserve at least one deadline's worth of time to compact a + // deadline. + minCompactionWindow := WPoStChallengeWindow + + // Make sure we have enough time in the proving period to do everything we need. + if (minCompactionWindow + immutableWindow + WPoStDisputeWindow) > WPoStProvingPeriod { + panic(fmt.Sprintf("together, the minimum compaction window (%d) immutability window (%d) and the dispute window (%d) exceed the proving period (%d)", + minCompactionWindow, immutableWindow, WPoStDisputeWindow, WPoStProvingPeriod)) + } } // The maximum number of partitions that can be loaded in a single invocation. @@ -67,6 +92,9 @@ const ( // Maximum size of a single prove-commit proof, in bytes. const MaxProveCommitSize = 10240 +// Maximum size of a single prove-commit proof, in bytes (the expected size is 192). +const MaxPoStProofSize = 1024 + // Maximum number of control addresses a miner may register. const MaxControlAddresses = 10 @@ -167,7 +195,12 @@ const DealWinIncentiveMultiplier = 2 // for permissioned actor methods and winning block elections. const ConsensusFaultIneligibilityDuration = ChainFinality -/* // DealWeight and VerifiedDealWeight are spacetime occupied by regular deals and verified deals in a sector. +// WPoStDisputeWindow is the period after a challenge window ends during which +// PoSts submitted during that period may be disputed. +const WPoStDisputeWindow = 2 * ChainFinality // PARAM_TODO + +/* +// DealWeight and VerifiedDealWeight are spacetime occupied by regular deals and verified deals in a sector. // Sum of DealWeight and VerifiedDealWeight should be less than or equal to total SpaceTime of a sector. // Sectors full of VerifiedDeals will have a SectorQuality of VerifiedDealWeightMultiplier/QualityBaseMultiplier. // Sectors full of Deals will have a SectorQuality of DealWeightMultiplier/QualityBaseMultiplier. @@ -309,3 +342,9 @@ func RewardForConsensusSlashReport(elapsedEpoch abi.ChainEpoch, collateral abi.T return big.Min(big.Div(num, denom), big.Div(big.Mul(collateral, consensusFaultMaxReporterShare.Numerator), consensusFaultMaxReporterShare.Denominator)) } + +// The reward given for successfully disputing a window post. +func RewardForDisputedWindowPoSt(proofType abi.RegisteredPoStProof, disputedPower PowerPair) abi.TokenAmount { + // This is currently just the base. In the future, the fee may scale based on the disputed power. + return BaseRewardForDisputedWindowPoSt +} diff --git a/actors/builtin/miner/testing.go b/actors/builtin/miner/testing.go index a5bb58b7f..cacf78c70 100644 --- a/actors/builtin/miner/testing.go +++ b/actors/builtin/miner/testing.go @@ -201,14 +201,46 @@ func CheckDeadlineStateInvariants(deadline *Deadline, store adt.Store, quant Qua }) acc.RequireNoError(err, "error iterating partitions") - // Check PoSt submissions - if postSubmissions, err := deadline.PostSubmissions.All(1 << 20); err != nil { - acc.Addf("error expanding post submissions: %v", err) - } else { - for _, p := range postSubmissions { - acc.Require(p <= partitionCount, "invalid PoSt submission for partition %d of %d", p, partitionCount) + // Check partitions snapshot to make sure we take the snapshot after + // dealing with recovering power and unproven power. + partitionsSnapshot, err := deadline.PartitionsSnapshotArray(store) + acc.RequireNoError(err, "error loading partitions snapshot") + err = partitionsSnapshot.ForEach(&partition, func(i int64) error { + acc := acc.WithPrefix("partition snapshot %d: ", i) // Shadow + + acc.Require(partition.RecoveringPower.IsZero(), "snapshot partition has recovering power") + if noRecoveries, err := partition.Recoveries.IsEmpty(); err != nil { + acc.Addf("error counting recoveries: %v", err) + } else { + acc.Require(noRecoveries, "snapshot partition has pending recoveries") } - } + + acc.Require(partition.UnprovenPower.IsZero(), "snapshot partition has unproven power") + if noUnproven, err := partition.Unproven.IsEmpty(); err != nil { + acc.Addf("error counting unproven: %v", err) + } else { + acc.Require(noUnproven, "snapshot partition has unproven sectors") + } + + return nil + }) + acc.RequireNoError(err, "error iterating partitions snapshot") + + // Check that we don't have any proofs proving partitions that are not in the snapshot. + proofsSnapshot, err := deadline.OptimisticProofsSnapshotArray(store) + acc.RequireNoError(err, "error loading proofs snapshot") + var proof WindowedPoSt + err = proofsSnapshot.ForEach(&proof, func(_ int64) error { + err = proof.Partitions.ForEach(func(i uint64) error { + found, err := partitionsSnapshot.Get(i, &partition) + acc.RequireNoError(err, "error loading partition snapshot") + acc.Require(found, "failed to find partition for recorded proof in the snapshot") + return nil + }) + acc.RequireNoError(err, "error iterating proof partitions bitfield") + return nil + }) + acc.RequireNoError(err, "error iterating proofs snapshot") // Check memoized sector and power values. live, err := bitfield.MultiMerge(allLiveSectors...) diff --git a/actors/builtin/shared.go b/actors/builtin/shared.go index 9141c0888..5bf96bffb 100644 --- a/actors/builtin/shared.go +++ b/actors/builtin/shared.go @@ -11,6 +11,7 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/exitcode" "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" "github.com/filecoin-project/specs-actors/v2/actors/runtime" ) @@ -197,12 +198,13 @@ type CheckedCID struct { CID cid.Cid `checked:"true"` } -func EnsureMinerNoPieces(rt runtime.Runtime, maddr address.Address, pieceCids []CheckedCID) error { - code := rt.Send(maddr, MethodsMiner.EnsureNoPiece, &BatchPieceCIDParams{PieceCIDs: pieceCids}, abi.NewTokenAmount(0), &Discard{}) +func CheckMinerStoredAnyPiece(rt runtime.Runtime, maddr address.Address, pieceCids []CheckedCID) (bool, error) { + var out cbg.CborBool + code := rt.Send(maddr, MethodsMiner.StoredAny, &BatchPieceCIDParams{PieceCIDs: pieceCids}, abi.NewTokenAmount(0), &out) if !code.IsSuccess() { - return code.Wrapf("failed to check miner has no pieces %s", maddr) + return false, code.Wrapf("failed to check if miner %s stored %v", maddr, pieceCids) } - return nil + return bool(out), nil } // Changed since v0: diff --git a/actors/migration/nv9/miner.go b/actors/migration/nv9/miner.go index d39dc043d..824c502cb 100644 --- a/actors/migration/nv9/miner.go +++ b/actors/migration/nv9/miner.go @@ -125,6 +125,12 @@ func (m *minerMigrator) migrateDeadlines(ctx context.Context, store cbor.IpldSto outDeadlines := miner3.Deadlines{Due: [miner3.WPoStPeriodDeadlines]cid.Cid{}} + // Start from an empty template to zero-initialize new fields. + deadlineTemplate, err := miner3.ConstructDeadline(adt3.WrapStore(ctx, store)) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to construct new deadline template") + } + for i, c := range inDeadlines.Due { outDlCid, err := cache.Load(DeadlineKey(c), func() (cid.Cid, error) { var inDeadline miner2.Deadline @@ -142,15 +148,14 @@ func (m *minerMigrator) migrateDeadlines(ctx context.Context, store cbor.IpldSto return cid.Undef, xerrors.Errorf("bitfield queue: %w", err) } - outDeadline := miner3.Deadline{ - Partitions: partitions, - ExpirationsEpochs: expirationEpochs, - PostSubmissions: inDeadline.PostSubmissions, - EarlyTerminations: inDeadline.EarlyTerminations, - LiveSectors: inDeadline.LiveSectors, - TotalSectors: inDeadline.TotalSectors, - FaultyPower: miner3.PowerPair(inDeadline.FaultyPower), - } + outDeadline := *deadlineTemplate + outDeadline.Partitions = partitions + outDeadline.ExpirationsEpochs = expirationEpochs + outDeadline.PartitionsPoSted = inDeadline.PostSubmissions + outDeadline.EarlyTerminations = inDeadline.EarlyTerminations + outDeadline.LiveSectors = inDeadline.LiveSectors + outDeadline.TotalSectors = inDeadline.TotalSectors + outDeadline.FaultyPower = miner3.PowerPair(inDeadline.FaultyPower) return store.Put(ctx, &outDeadline) }) diff --git a/gen/gen.go b/gen/gen.go index cb590777e..22c24d6ba 100644 --- a/gen/gen.go +++ b/gen/gen.go @@ -243,6 +243,7 @@ func main() { miner.WorkerKeyChange{}, miner.VestingFunds{}, miner.VestingFund{}, + miner.WindowedPoSt{}, // method params and returns // miner.ConstructorParams{}, // in power actor miner.SubmitWindowedPoStParams{}, @@ -263,6 +264,7 @@ func main() { miner.CompactSectorNumbersParams{}, miner.CronEventPayload{}, miner.WithdrawPledgeParams{}, + miner.DisputeWindowedPoStParams{}, // other types miner.FaultDeclaration{}, miner.RecoveryDeclaration{},