From dc247cc57d0ac7ea11a1be145040288cdd7736dc Mon Sep 17 00:00:00 2001 From: Shrenuj Bansal Date: Tue, 4 Oct 2022 18:33:18 +0000 Subject: [PATCH 1/3] Add ability to only have single partition per msg for partitions with recovery sectors --- node/config/types.go | 16 +++++-- storage/wdpost/wdpost_run.go | 44 +++++++++++++------- storage/wdpost/wdpost_run_test.go | 69 +++++++++++++++++++++++++++++++ storage/wdpost/wdpost_sched.go | 50 +++++++++++----------- 4 files changed, 136 insertions(+), 43 deletions(-) diff --git a/node/config/types.go b/node/config/types.go index 1f55da1c6eb..52b5e655495 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -276,11 +276,9 @@ type ProvingConfig struct { // A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors. // // The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which - // means that a single message can prove at most 10 partinions + // means that a single message can prove at most 10 partitions // - // In some cases when submitting PoSt messages which are recovering sectors, the default network limit may still be - // too high to fit in the block gas limit; In those cases it may be necessary to set this value to something lower - // than 10; Note that setting this value lower may result in less efficient gas use - more messages will be sent, + // Note that setting this value lower may result in less efficient gas use - more messages will be sent, // to prove each deadline, resulting in more total gas use (but each message will have lower gas limit) // // Setting this value above the network limit has no effect @@ -294,6 +292,16 @@ type ProvingConfig struct { // Note that setting this value lower may result in less efficient gas use - more messages will be sent than needed, // resulting in more total gas use (but each message will have lower gas limit) MaxPartitionsPerRecoveryMessage int + + // Enable single partition per Post Message for partitions containing recovery sectors + // + // In cases when submitting PoSt messages which contain recovering sectors, the default network limit may still be + // too high to fit in the block gas limit. In those cases, it becomes useful to only house the single partition + // with recovery sectors in the post message + // + // Note that setting this value lower may result in less efficient gas use - more messages will be sent, + // to prove each deadline, resulting in more total gas use (but each message will have lower gas limit) + SingleRecoveringPartitionPerPostMessage bool } type SealingConfig struct { diff --git a/storage/wdpost/wdpost_run.go b/storage/wdpost/wdpost_run.go index 3640eafe59f..f34cf7c0c3d 100644 --- a/storage/wdpost/wdpost_run.go +++ b/storage/wdpost/wdpost_run.go @@ -286,7 +286,7 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, manual bool, di // Split partitions into batches, so as not to exceed the number of sectors // allowed in a single message - partitionBatches, err := s.batchPartitions(partitions, nv) + partitionBatches, err := s.BatchPartitions(partitions, nv) if err != nil { return nil, err } @@ -492,7 +492,9 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, manual bool, di return posts, nil } -func (s *WindowPoStScheduler) batchPartitions(partitions []api.Partition, nv network.Version) ([][]api.Partition, error) { +// Note: Partition order within batches must match original partition order in order +// for code following the user code to work +func (s *WindowPoStScheduler) BatchPartitions(partitions []api.Partition, nv network.Version) ([][]api.Partition, error) { // We don't want to exceed the number of sectors allowed in a message. // So given the number of sectors in a partition, work out the number of // partitions that can be in a message without exceeding sectors per @@ -524,21 +526,33 @@ func (s *WindowPoStScheduler) batchPartitions(partitions []api.Partition, nv net } } - // The number of messages will be: - // ceiling(number of partitions / partitions per message) - batchCount := len(partitions) / partitionsPerMsg - if len(partitions)%partitionsPerMsg != 0 { - batchCount++ - } + batches := [][]api.Partition{} + + currBatch := []api.Partition{} + for i := 0; i < len(partitions); i++ { + recSectors, err := partitions[i].RecoveringSectors.Count() + if err != nil { + return nil, err + } - // Split the partitions into batches - batches := make([][]api.Partition, 0, batchCount) - for i := 0; i < len(partitions); i += partitionsPerMsg { - end := i + partitionsPerMsg - if end > len(partitions) { - end = len(partitions) + // Only add single partition to a batch if it contains recovery sectors + // and has the below user config set + if s.singleRecoveringPartitionPerPostMessage && recSectors > 0 { + if len(currBatch) > 0 { + batches = append(batches, currBatch) + currBatch = []api.Partition{} + } + batches = append(batches, []api.Partition{partitions[i]}) + } else { + if len(currBatch) >= partitionsPerMsg { + batches = append(batches, currBatch) + currBatch = []api.Partition{} + } + currBatch = append(currBatch, partitions[i]) } - batches = append(batches, partitions[i:end]) + } + if len(currBatch) > 0 { + batches = append(batches, currBatch) } return batches, nil diff --git a/storage/wdpost/wdpost_run_test.go b/storage/wdpost/wdpost_run_test.go index cb110cc0d05..f5578d1ba6e 100644 --- a/storage/wdpost/wdpost_run_test.go +++ b/storage/wdpost/wdpost_run_test.go @@ -177,6 +177,26 @@ func (m mockFaultTracker) CheckProvable(ctx context.Context, pp abi.RegisteredPo return map[abi.SectorID]string{}, nil } +func generatePartition(sectorCount uint64, recoverySectorCount uint64) api.Partition { + var partition api.Partition + sectors := bitfield.New() + recoverySectors := bitfield.New() + for s := uint64(0); s < sectorCount; s++ { + sectors.Set(s) + } + for s := uint64(0); s < recoverySectorCount; s++ { + recoverySectors.Set(s) + } + partition = api.Partition{ + AllSectors: sectors, + FaultySectors: bitfield.New(), + RecoveringSectors: recoverySectors, + LiveSectors: sectors, + ActiveSectors: sectors, + } + return partition +} + // TestWDPostDoPost verifies that doPost will send the correct number of window // PoST messages for a given number of partitions func TestWDPostDoPost(t *testing.T) { @@ -368,6 +388,55 @@ func TestWDPostDoPostPartLimitConfig(t *testing.T) { } } +// TestBatchPartitionsRecoverySectors tests if the batches with recovery sectors +// contain only single partitions while keeping all the partitions in order +func TestBatchPartitionsRecoverySectors(t *testing.T) { + + proofType := abi.RegisteredPoStProof_StackedDrgWindow2KiBV1 + postAct := tutils.NewIDAddr(t, 100) + + mockStgMinerAPI := newMockStorageMinerAPI() + + userPartLimit := 4 + + scheduler := &WindowPoStScheduler{ + api: mockStgMinerAPI, + prover: &mockProver{}, + verifier: &mockVerif{}, + faultTracker: &mockFaultTracker{}, + proofType: proofType, + actor: postAct, + journal: journal.NilJournal(), + addrSel: &ctladdr.AddressSelector{}, + + maxPartitionsPerPostMessage: userPartLimit, + singleRecoveringPartitionPerPostMessage: true, + } + + var partitions []api.Partition + for p := 0; p < 4; p++ { + partitions = append(partitions, generatePartition(100, 0)) + } + for p := 0; p < 2; p++ { + partitions = append(partitions, generatePartition(100, 10)) + } + for p := 0; p < 6; p++ { + partitions = append(partitions, generatePartition(100, 0)) + } + partitions = append(partitions, generatePartition(100, 10)) + + expectedBatchLens := []int{4, 1, 1, 4, 2, 1} + + batches, err := scheduler.BatchPartitions(partitions, network.Version16) + require.NoError(t, err) + + require.Equal(t, len(batches), 6) + + for i, batch := range batches { + require.Equal(t, len(batch), expectedBatchLens[i]) + } +} + // TestWDPostDeclareRecoveriesPartLimitConfig verifies that declareRecoveries will send the correct number of // DeclareFaultsRecovered messages for a given number of partitions based on user config func TestWDPostDeclareRecoveriesPartLimitConfig(t *testing.T) { diff --git a/storage/wdpost/wdpost_sched.go b/storage/wdpost/wdpost_sched.go index 21dfecb60ae..29c39ad9e44 100644 --- a/storage/wdpost/wdpost_sched.go +++ b/storage/wdpost/wdpost_sched.go @@ -64,18 +64,19 @@ type NodeAPI interface { // WindowPoStScheduler watches the chain though the changeHandler, which in turn // turn calls the scheduler when the time arrives to do work. type WindowPoStScheduler struct { - api NodeAPI - feeCfg config.MinerFeeConfig - addrSel *ctladdr.AddressSelector - prover storiface.ProverPoSt - verifier storiface.Verifier - faultTracker sealer.FaultTracker - proofType abi.RegisteredPoStProof - partitionSectors uint64 - disablePreChecks bool - maxPartitionsPerPostMessage int - maxPartitionsPerRecoveryMessage int - ch *changeHandler + api NodeAPI + feeCfg config.MinerFeeConfig + addrSel *ctladdr.AddressSelector + prover storiface.ProverPoSt + verifier storiface.Verifier + faultTracker sealer.FaultTracker + proofType abi.RegisteredPoStProof + partitionSectors uint64 + disablePreChecks bool + maxPartitionsPerPostMessage int + maxPartitionsPerRecoveryMessage int + singleRecoveringPartitionPerPostMessage bool + ch *changeHandler actor address.Address @@ -102,18 +103,19 @@ func NewWindowedPoStScheduler(api NodeAPI, } return &WindowPoStScheduler{ - api: api, - feeCfg: cfg, - addrSel: as, - prover: sp, - verifier: verif, - faultTracker: ft, - proofType: mi.WindowPoStProofType, - partitionSectors: mi.WindowPoStPartitionSectors, - disablePreChecks: pcfg.DisableWDPoStPreChecks, - maxPartitionsPerPostMessage: pcfg.MaxPartitionsPerPoStMessage, - maxPartitionsPerRecoveryMessage: pcfg.MaxPartitionsPerRecoveryMessage, - actor: actor, + api: api, + feeCfg: cfg, + addrSel: as, + prover: sp, + verifier: verif, + faultTracker: ft, + proofType: mi.WindowPoStProofType, + partitionSectors: mi.WindowPoStPartitionSectors, + disablePreChecks: pcfg.DisableWDPoStPreChecks, + maxPartitionsPerPostMessage: pcfg.MaxPartitionsPerPoStMessage, + maxPartitionsPerRecoveryMessage: pcfg.MaxPartitionsPerRecoveryMessage, + singleRecoveringPartitionPerPostMessage: pcfg.SingleRecoveringPartitionPerPostMessage, + actor: actor, evtTypes: [...]journal.EventType{ evtTypeWdPoStScheduler: j.RegisterEventType("wdpost", "scheduler"), evtTypeWdPoStProofs: j.RegisterEventType("wdpost", "proofs_processed"), From 96ddd756f81a9879af51175d5c04c2309205cd39 Mon Sep 17 00:00:00 2001 From: Shrenuj Bansal Date: Tue, 4 Oct 2022 18:44:00 +0000 Subject: [PATCH 2/3] doc gen --- .../en/default-lotus-miner-config.toml | 19 +++++++++++++++---- node/config/doc_gen.go | 19 +++++++++++++++---- 2 files changed, 30 insertions(+), 8 deletions(-) diff --git a/documentation/en/default-lotus-miner-config.toml b/documentation/en/default-lotus-miner-config.toml index 0147f979e4d..b16c0e8fda1 100644 --- a/documentation/en/default-lotus-miner-config.toml +++ b/documentation/en/default-lotus-miner-config.toml @@ -380,11 +380,9 @@ # A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors. # # The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which - # means that a single message can prove at most 10 partinions + # means that a single message can prove at most 10 partitions # - # In some cases when submitting PoSt messages which are recovering sectors, the default network limit may still be - # too high to fit in the block gas limit; In those cases it may be necessary to set this value to something lower - # than 10; Note that setting this value lower may result in less efficient gas use - more messages will be sent, + # Note that setting this value lower may result in less efficient gas use - more messages will be sent, # to prove each deadline, resulting in more total gas use (but each message will have lower gas limit) # # Setting this value above the network limit has no effect @@ -403,6 +401,19 @@ # env var: LOTUS_PROVING_MAXPARTITIONSPERRECOVERYMESSAGE #MaxPartitionsPerRecoveryMessage = 0 + # Enable single partition per Post Message for partitions containing recovery sectors + # + # In cases when submitting PoSt messages which contain recovering sectors, the default network limit may still be + # too high to fit in the block gas limit. In those cases, it becomes useful to only house the single partition + # with recovery sectors in the post message + # + # Note that setting this value lower may result in less efficient gas use - more messages will be sent, + # to prove each deadline, resulting in more total gas use (but each message will have lower gas limit) + # + # type: bool + # env var: LOTUS_PROVING_SINGLERECOVERINGPARTITIONPERPOSTMESSAGE + #SingleRecoveringPartitionPerPostMessage = false + [Sealing] # Upper bound on how many sectors can be waiting for more deals to be packed in it before it begins sealing at any given time. diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index 20b175ccb8b..fe805407a41 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -698,11 +698,9 @@ After changing this option, confirm that the new value works in your setup by in A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors. The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which -means that a single message can prove at most 10 partinions +means that a single message can prove at most 10 partitions -In some cases when submitting PoSt messages which are recovering sectors, the default network limit may still be -too high to fit in the block gas limit; In those cases it may be necessary to set this value to something lower -than 10; Note that setting this value lower may result in less efficient gas use - more messages will be sent, +Note that setting this value lower may result in less efficient gas use - more messages will be sent, to prove each deadline, resulting in more total gas use (but each message will have lower gas limit) Setting this value above the network limit has no effect`, @@ -717,6 +715,19 @@ In those cases it may be necessary to set this value to something low (eg 1); Note that setting this value lower may result in less efficient gas use - more messages will be sent than needed, resulting in more total gas use (but each message will have lower gas limit)`, }, + { + Name: "SingleRecoveringPartitionPerPostMessage", + Type: "bool", + + Comment: `Enable single partition per Post Message for partitions containing recovery sectors + +In cases when submitting PoSt messages which contain recovering sectors, the default network limit may still be +too high to fit in the block gas limit. In those cases, it becomes useful to only house the single partition +with recovery sectors in the post message + +Note that setting this value lower may result in less efficient gas use - more messages will be sent, +to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)`, + }, }, "Pubsub": []DocField{ { From 9653584d73c35e0f41657c3ba3111598dd3b6dbc Mon Sep 17 00:00:00 2001 From: Shrenuj Bansal Date: Tue, 4 Oct 2022 19:21:55 +0000 Subject: [PATCH 3/3] Address comments --- documentation/en/default-lotus-miner-config.toml | 4 ++-- node/config/doc_gen.go | 4 ++-- node/config/types.go | 4 ++-- storage/wdpost/wdpost_run.go | 8 ++++---- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/documentation/en/default-lotus-miner-config.toml b/documentation/en/default-lotus-miner-config.toml index b16c0e8fda1..b51e325e885 100644 --- a/documentation/en/default-lotus-miner-config.toml +++ b/documentation/en/default-lotus-miner-config.toml @@ -401,11 +401,11 @@ # env var: LOTUS_PROVING_MAXPARTITIONSPERRECOVERYMESSAGE #MaxPartitionsPerRecoveryMessage = 0 - # Enable single partition per Post Message for partitions containing recovery sectors + # Enable single partition per PoSt Message for partitions containing recovery sectors # # In cases when submitting PoSt messages which contain recovering sectors, the default network limit may still be # too high to fit in the block gas limit. In those cases, it becomes useful to only house the single partition - # with recovery sectors in the post message + # with recovering sectors in the post message # # Note that setting this value lower may result in less efficient gas use - more messages will be sent, # to prove each deadline, resulting in more total gas use (but each message will have lower gas limit) diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index fe805407a41..80538c70e40 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -719,11 +719,11 @@ resulting in more total gas use (but each message will have lower gas limit)`, Name: "SingleRecoveringPartitionPerPostMessage", Type: "bool", - Comment: `Enable single partition per Post Message for partitions containing recovery sectors + Comment: `Enable single partition per PoSt Message for partitions containing recovery sectors In cases when submitting PoSt messages which contain recovering sectors, the default network limit may still be too high to fit in the block gas limit. In those cases, it becomes useful to only house the single partition -with recovery sectors in the post message +with recovering sectors in the post message Note that setting this value lower may result in less efficient gas use - more messages will be sent, to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)`, diff --git a/node/config/types.go b/node/config/types.go index 52b5e655495..df5f018e3c7 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -293,11 +293,11 @@ type ProvingConfig struct { // resulting in more total gas use (but each message will have lower gas limit) MaxPartitionsPerRecoveryMessage int - // Enable single partition per Post Message for partitions containing recovery sectors + // Enable single partition per PoSt Message for partitions containing recovery sectors // // In cases when submitting PoSt messages which contain recovering sectors, the default network limit may still be // too high to fit in the block gas limit. In those cases, it becomes useful to only house the single partition - // with recovery sectors in the post message + // with recovering sectors in the post message // // Note that setting this value lower may result in less efficient gas use - more messages will be sent, // to prove each deadline, resulting in more total gas use (but each message will have lower gas limit) diff --git a/storage/wdpost/wdpost_run.go b/storage/wdpost/wdpost_run.go index f34cf7c0c3d..53b7d55c9bb 100644 --- a/storage/wdpost/wdpost_run.go +++ b/storage/wdpost/wdpost_run.go @@ -529,8 +529,8 @@ func (s *WindowPoStScheduler) BatchPartitions(partitions []api.Partition, nv net batches := [][]api.Partition{} currBatch := []api.Partition{} - for i := 0; i < len(partitions); i++ { - recSectors, err := partitions[i].RecoveringSectors.Count() + for _, partition := range partitions { + recSectors, err := partition.RecoveringSectors.Count() if err != nil { return nil, err } @@ -542,13 +542,13 @@ func (s *WindowPoStScheduler) BatchPartitions(partitions []api.Partition, nv net batches = append(batches, currBatch) currBatch = []api.Partition{} } - batches = append(batches, []api.Partition{partitions[i]}) + batches = append(batches, []api.Partition{partition}) } else { if len(currBatch) >= partitionsPerMsg { batches = append(batches, currBatch) currBatch = []api.Partition{} } - currBatch = append(currBatch, partitions[i]) + currBatch = append(currBatch, partition) } } if len(currBatch) > 0 {