Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

storagefsm: Fix batch deal packing behavior #6041

Merged
merged 6 commits into from
May 31, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
195 changes: 130 additions & 65 deletions api/test/deals.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"math/rand"
"os"
"path/filepath"
"sort"
"testing"
"time"

Expand All @@ -18,6 +19,7 @@ import (

"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
Expand Down Expand Up @@ -51,7 +53,7 @@ func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, sta
}

func MakeDeal(t *testing.T, ctx context.Context, rseed int, client api.FullNode, miner TestStorageNode, carExport, fastRet bool, startEpoch abi.ChainEpoch) {
res, data, err := CreateClientFile(ctx, client, rseed)
res, data, err := CreateClientFile(ctx, client, rseed, 0)
if err != nil {
t.Fatal(err)
}
Expand All @@ -63,7 +65,7 @@ func MakeDeal(t *testing.T, ctx context.Context, rseed int, client api.FullNode,

// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
waitDealSealed(t, ctx, miner, client, deal, false)
waitDealSealed(t, ctx, miner, client, deal, false, false, nil)

// Retrieval
info, err := client.ClientGetDealInfo(ctx, *deal)
Expand All @@ -72,8 +74,11 @@ func MakeDeal(t *testing.T, ctx context.Context, rseed int, client api.FullNode,
testRetrieval(t, ctx, client, fcid, &info.PieceCID, carExport, data)
}

func CreateClientFile(ctx context.Context, client api.FullNode, rseed int) (*api.ImportRes, []byte, error) {
data := make([]byte, 1600)
func CreateClientFile(ctx context.Context, client api.FullNode, rseed, size int) (*api.ImportRes, []byte, error) {
if size == 0 {
size = 1600
}
data := make([]byte, size)
rand.New(rand.NewSource(int64(rseed))).Read(data)

dir, err := ioutil.TempDir(os.TempDir(), "test-make-deal-")
Expand Down Expand Up @@ -119,7 +124,7 @@ func TestPublishDealsBatching(t *testing.T, b APIBuilder, blocktime time.Duratio

// Starts a deal and waits until it's published
runDealTillPublish := func(rseed int) {
res, _, err := CreateClientFile(s.ctx, s.client, rseed)
res, _, err := CreateClientFile(s.ctx, s.client, rseed, 0)
require.NoError(t, err)

upds, err := client.ClientGetDealUpdates(s.ctx)
Expand Down Expand Up @@ -186,68 +191,109 @@ func TestPublishDealsBatching(t *testing.T, b APIBuilder, blocktime time.Duratio
}

func TestBatchDealInput(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
publishPeriod := 10 * time.Second
maxDealsPerMsg := uint64(4)
run := func(piece, deals, expectSectors int) func(t *testing.T) {
return func(t *testing.T) {
publishPeriod := 10 * time.Second
maxDealsPerMsg := uint64(deals)

// Set max deals per publish deals message to maxDealsPerMsg
minerDef := []StorageMiner{{
Full: 0,
Opts: node.Options(
node.Override(
new(*storageadapter.DealPublisher),
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
Period: publishPeriod,
MaxDealsPerMsg: maxDealsPerMsg,
})),
node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) {
return func() (sealiface.Config, error) {
return sealiface.Config{
MaxWaitDealsSectors: 2,
MaxSealingSectors: 1,
MaxSealingSectorsForDeals: 3,
AlwaysKeepUnsealedCopy: true,
WaitDealsDelay: time.Hour,
}, nil
}, nil
}),
),
Preseal: PresealGenesis,
}}

// Create a connect client and miner node
n, sn := b(t, OneFull, minerDef)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
s := connectAndStartMining(t, b, blocktime, client, miner)
defer s.blockMiner.Stop()

err := miner.MarketSetAsk(s.ctx, big.Zero(), big.Zero(), 200, 128, 32<<30)
require.NoError(t, err)

// Set max deals per publish deals message to maxDealsPerMsg
minerDef := []StorageMiner{{
Full: 0,
Opts: node.Options(
node.Override(
new(*storageadapter.DealPublisher),
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
Period: publishPeriod,
MaxDealsPerMsg: maxDealsPerMsg,
})),
node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) {
return func() (sealiface.Config, error) {
return sealiface.Config{
MaxWaitDealsSectors: 1,
MaxSealingSectors: 1,
MaxSealingSectorsForDeals: 2,
AlwaysKeepUnsealedCopy: true,
}, nil
}, nil
}),
),
Preseal: PresealGenesis,
}}
checkNoPadding := func() {
sl, err := sn[0].SectorsList(s.ctx)
require.NoError(t, err)

// Create a connect client and miner node
n, sn := b(t, OneFull, minerDef)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
s := connectAndStartMining(t, b, blocktime, client, miner)
defer s.blockMiner.Stop()
sort.Slice(sl, func(i, j int) bool {
return sl[i] < sl[j]
})

// Starts a deal and waits until it's published
runDealTillSeal := func(rseed int) {
res, _, err := CreateClientFile(s.ctx, s.client, rseed)
require.NoError(t, err)
for _, snum := range sl {
si, err := sn[0].SectorsStatus(s.ctx, snum, false)
require.NoError(t, err)

dc := startDeal(t, s.ctx, s.miner, s.client, res.Root, false, startEpoch)
waitDealSealed(t, s.ctx, s.miner, s.client, dc, false)
}
// fmt.Printf("S %d: %+v %s\n", snum, si.Deals, si.State)

// Run maxDealsPerMsg+1 deals in parallel
done := make(chan struct{}, maxDealsPerMsg+1)
for rseed := 1; rseed <= int(maxDealsPerMsg+1); rseed++ {
rseed := rseed
go func() {
runDealTillSeal(rseed)
done <- struct{}{}
}()
}
for _, deal := range si.Deals {
if deal == 0 {
fmt.Printf("sector %d had a padding piece!\n", snum)
}
}
}
}

// Wait for maxDealsPerMsg of the deals to be published
for i := 0; i < int(maxDealsPerMsg); i++ {
<-done
// Starts a deal and waits until it's published
runDealTillSeal := func(rseed int) {
res, _, err := CreateClientFile(s.ctx, s.client, rseed, piece)
require.NoError(t, err)

dc := startDeal(t, s.ctx, s.miner, s.client, res.Root, false, startEpoch)
waitDealSealed(t, s.ctx, s.miner, s.client, dc, false, true, checkNoPadding)
}

// Run maxDealsPerMsg deals in parallel
done := make(chan struct{}, maxDealsPerMsg)
for rseed := 0; rseed < int(maxDealsPerMsg); rseed++ {
rseed := rseed
go func() {
runDealTillSeal(rseed)
done <- struct{}{}
}()
}

// Wait for maxDealsPerMsg of the deals to be published
for i := 0; i < int(maxDealsPerMsg); i++ {
<-done
}

checkNoPadding()

sl, err := sn[0].SectorsList(s.ctx)
require.NoError(t, err)
require.Equal(t, len(sl), expectSectors)
}
}

sl, err := sn[0].SectorsList(s.ctx)
require.NoError(t, err)
require.GreaterOrEqual(t, len(sl), 4)
require.LessOrEqual(t, len(sl), 5)
t.Run("4-p1600B", run(1600, 4, 4))
t.Run("4-p513B", run(513, 4, 2))
if !testing.Short() {
t.Run("32-p257B", run(257, 32, 8))
t.Run("32-p10B", run(10, 32, 2))

// fixme: this appears to break data-transfer / markets in some really creative ways
//t.Run("128-p10B", run(10, 128, 8))
}
}

func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
Expand Down Expand Up @@ -303,12 +349,12 @@ func TestSecondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration

// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
waitDealSealed(t, s.ctx, s.miner, s.client, deal1, true)
waitDealSealed(t, s.ctx, s.miner, s.client, deal1, true, false, nil)

deal2 := startDeal(t, s.ctx, s.miner, s.client, fcid2, true, 0)

time.Sleep(time.Second)
waitDealSealed(t, s.ctx, s.miner, s.client, deal2, false)
waitDealSealed(t, s.ctx, s.miner, s.client, deal2, false, false, nil)

// Retrieval
info, err := s.client.ClientGetDealInfo(s.ctx, *deal2)
Expand Down Expand Up @@ -364,7 +410,7 @@ func startDeal(t *testing.T, ctx context.Context, miner TestStorageNode, client
return deal
}

func waitDealSealed(t *testing.T, ctx context.Context, miner TestStorageNode, client api.FullNode, deal *cid.Cid, noseal bool) {
func waitDealSealed(t *testing.T, ctx context.Context, miner TestStorageNode, client api.FullNode, deal *cid.Cid, noseal, noSealStart bool, cb func()) {
loop:
for {
di, err := client.ClientGetDealInfo(ctx, *deal)
Expand All @@ -376,7 +422,9 @@ loop:
if noseal {
return
}
startSealingWaiting(t, ctx, miner)
if !noSealStart {
startSealingWaiting(t, ctx, miner)
}
case storagemarket.StorageDealProposalRejected:
t.Fatal("deal rejected")
case storagemarket.StorageDealFailing:
Expand All @@ -387,8 +435,25 @@ loop:
fmt.Println("COMPLETE", di)
break loop
}
fmt.Println("Deal state: ", storagemarket.DealStates[di.State])

mds, err := miner.MarketListIncompleteDeals(ctx)
if err != nil {
t.Fatal(err)
}

var minerState storagemarket.StorageDealStatus
for _, md := range mds {
if md.DealID == di.DealID {
minerState = md.State
break
}
}

fmt.Printf("Deal %d state: client:%s provider:%s\n", di.DealID, storagemarket.DealStates[di.State], storagemarket.DealStates[minerState])
time.Sleep(time.Second / 2)
if cb != nil {
cb()
}
}
}

Expand Down Expand Up @@ -430,7 +495,7 @@ func startSealingWaiting(t *testing.T, ctx context.Context, miner TestStorageNod
si, err := miner.SectorsStatus(ctx, snum, false)
require.NoError(t, err)

t.Logf("Sector state: %s", si.State)
t.Logf("Sector %d state: %s", snum, si.State)
if si.State == api.SectorState(sealing.WaitDeals) {
require.NoError(t, miner.SectorStartSealing(ctx, snum))
}
Expand Down
2 changes: 1 addition & 1 deletion api/test/mining.go
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ func TestDealMining(t *testing.T, b APIBuilder, blocktime time.Duration, carExpo
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)

waitDealSealed(t, ctx, provider, client, deal, false)
waitDealSealed(t, ctx, provider, client, deal, false, false, nil)

<-minedTwo

Expand Down
12 changes: 7 additions & 5 deletions chain/events/events_called.go
Original file line number Diff line number Diff line change
Expand Up @@ -144,8 +144,10 @@ func (e *hcEvents) processHeadChangeEvent(rev, app []*types.TipSet) error {

// Queue up calls until there have been enough blocks to reach
// confidence on the message calls
for tid, data := range newCalls {
e.queueForConfidence(tid, data, nil, ts)
for tid, calls := range newCalls {
for _, data := range calls {
e.queueForConfidence(tid, data, nil, ts)
}
}

for at := e.lastTs.Height(); at <= ts.Height(); at++ {
Expand Down Expand Up @@ -474,7 +476,7 @@ func newMessageEvents(ctx context.Context, hcAPI headChangeAPI, cs EventAPI) mes
}

// Check if there are any new actor calls
func (me *messageEvents) checkNewCalls(ts *types.TipSet) (map[triggerID]eventData, error) {
func (me *messageEvents) checkNewCalls(ts *types.TipSet) (map[triggerID][]eventData, error) {
pts, err := me.cs.ChainGetTipSet(me.ctx, ts.Parents()) // we actually care about messages in the parent tipset here
if err != nil {
log.Errorf("getting parent tipset in checkNewCalls: %s", err)
Expand All @@ -485,7 +487,7 @@ func (me *messageEvents) checkNewCalls(ts *types.TipSet) (map[triggerID]eventDat
defer me.lk.RUnlock()

// For each message in the tipset
res := make(map[triggerID]eventData)
res := make(map[triggerID][]eventData)
me.messagesForTs(pts, func(msg *types.Message) {
// TODO: provide receipts

Expand All @@ -500,7 +502,7 @@ func (me *messageEvents) checkNewCalls(ts *types.TipSet) (map[triggerID]eventDat
// If there was a match, include the message in the results for the
// trigger
if matched {
res[tid] = msg
res[tid] = append(res[tid], msg)
}
}
})
Expand Down
Loading