From 6252ad7c41bd4ea6cb7a698cf3794109338a99c7 Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Mon, 9 Oct 2023 18:51:38 +0400 Subject: [PATCH 01/34] remove gfm, pull datatransfer --- .circleci/config.yml | 10 - api/api.go | 85 - api/api_full.go | 80 +- api/docgen/docgen.go | 21 +- api/proxy_gen.go | 304 -- api/types.go | 66 - cmd/boost/provider_cmd.go | 99 +- cmd/boostd/import_data.go | 14 +- cmd/boostd/legacy_data_transfers.go | 325 -- cmd/boostd/legacy_retrieval_deals.go | 277 -- cmd/boostd/recover.go | 2 +- cmd/booster-http/server.go | 4 - cmd/boostx/stats_cmd.go | 2 +- cmd/boostx/utils_cmd.go | 2 +- cmd/lib/common.go | 10 +- cmd/lib/stores/dagstore.go | 86 + cmd/lib/stores/error.go | 9 + cmd/lib/stores/filestore.go | 164 + cmd/lib/stores/kvcarbs.go | 1677 +++++++++ cmd/lib/stores/ro_bstores.go | 60 + cmd/lib/stores/rw_bstores.go | 63 + cmd/migrate-lid/migrate_lid.go | 6 +- datatransfer/channelmonitor/channelmonitor.go | 509 +++ datatransfer/channels/block_index_cache.go | 63 + datatransfer/channels/channel_state.go | 251 ++ datatransfer/channels/channels.go | 414 +++ datatransfer/channels/channels_fsm.go | 298 ++ .../channels/internal/internalchannel.go | 90 + .../internal/internalchannel_cbor_gen.go | 1043 ++++++ .../internal/migrations/migrations.go | 13 + datatransfer/encoding/encoding.go | 171 + datatransfer/errors.go | 32 + datatransfer/events.go | 160 + datatransfer/impl/environment.go | 27 + datatransfer/impl/events.go | 590 ++++ datatransfer/impl/impl.go | 561 +++ datatransfer/impl/receiver.go | 192 + datatransfer/impl/restart.go | 198 ++ datatransfer/impl/timecounter.go | 21 + datatransfer/impl/utils.go | 140 + datatransfer/manager.go | 260 ++ datatransfer/message.go | 56 + datatransfer/message/message.go | 19 + datatransfer/message/message1_1/message.go | 195 + .../message/message1_1/transfer_message.go | 58 + .../message1_1/transfer_message_cbor_gen.go | 187 + .../message/message1_1/transfer_request.go | 165 + .../message1_1/transfer_request_cbor_gen.go | 405 +++ .../message/message1_1/transfer_response.go | 126 + .../message1_1/transfer_response_cbor_gen.go | 265 ++ .../message/message1_1prime/message.go | 206 ++ .../message/message1_1prime/schema.go | 29 + .../message/message1_1prime/schema.ipldsch | 37 + .../message1_1prime/transfer_message.go | 43 + .../message1_1prime/transfer_request.go | 146 + .../message1_1prime/transfer_response.go | 115 + datatransfer/message/types/message_types.go | 16 + datatransfer/network/interface.go | 57 + datatransfer/network/libp2p_impl.go | 355 ++ datatransfer/registry/registry.go | 81 + datatransfer/testutil/testutils.go | 24 + datatransfer/tracing/tracing.go | 64 + .../graphsync/extension/gsextension.go | 83 + datatransfer/transport/graphsync/graphsync.go | 1312 +++++++ datatransfer/types.go | 428 +++ datatransfer/types_cbor_gen.go | 447 +++ db/migrations/20231005140947_create_ask.sql | 18 + db/storageask.go | 84 + fundmanager/fundmanager.go | 10 +- go.mod | 27 +- go.sum | 2 - gql/resolver.go | 16 +- gql/resolver_ask.go | 16 +- gql/resolver_legacy.go | 30 +- gql/resolver_rtvllog.go | 1 + indexprovider/wrapper.go | 110 +- indexprovider/wrapper_test.go | 24 +- itests/framework/framework.go | 2 +- itests/markets_v1_deal_test.go | 55 - itests/markets_v1_offline_deal_test.go | 89 - lib/legacy/dealmanager.go | 116 +- lib/legacy/mocks/legacy_manager_mock.go | 154 + markets/journal.go | 76 - markets/loggers/loggers.go | 37 +- markets/piecestore/impl/piecestore.go | 214 ++ markets/piecestore/migrations/migrations.go | 90 + .../migrations/migrations_cbor_gen.go | 507 +++ markets/piecestore/types.go | 70 + markets/piecestore/types_cbor_gen.go | 737 ++++ markets/pricing/cli.go | 50 - markets/retrievaladapter/client.go | 127 - markets/retrievaladapter/client_blockstore.go | 83 - markets/retrievaladapter/provider.go | 108 - markets/retrievaladapter/provider_test.go | 206 -- markets/sectoraccessor/sectoraccessor.go | 2 +- markets/shared/ready.go | 104 + markets/shared/retrystream.go | 103 + markets/shared/shared.go | 7 + markets/shared/timecounter.go | 21 + markets/storageadapter/client.go | 446 --- markets/storageadapter/client_blockstore.go | 102 - markets/storageadapter/dealpublisher.go | 3 + markets/storageadapter/dealstatematcher.go | 85 - .../storageadapter/dealstatematcher_test.go | 155 - .../storageadapter/ondealsectorcommitted.go | 399 --- .../ondealsectorcommitted_test.go | 581 --- markets/storageadapter/provider.go | 438 --- markets/utils/converters.go | 12 +- node/builder.go | 80 +- node/impl/boost.go | 395 +-- node/impl/boost_legacy.go | 239 -- node/modules/client.go | 176 - node/modules/dealfilter.go | 4 +- node/modules/dtypes/miner.go | 7 +- node/modules/dtypes/storage.go | 15 +- node/modules/graphsync.go | 49 +- node/modules/legacy_markets.go | 127 - node/modules/piecedirectory.go | 152 +- node/modules/provider_data_transfer.go | 49 - node/modules/provider_piece_store.go | 25 - node/modules/retrieval.go | 12 +- node/modules/storageminer.go | 386 +- node/modules/storageminer_dagstore.go | 138 - node/modules/storageminer_idxprov.go | 184 +- retrievalmarket/client/client.go | 133 +- retrievalmarket/lib/idxciddagstore.go | 72 - retrievalmarket/lib/shardselector.go | 181 - retrievalmarket/lib/shardselector_test.go | 129 - retrievalmarket/lp2pimpl/transports.go | 2 +- retrievalmarket/mock/gen.go | 4 +- retrievalmarket/mock/piecestore.go | 14 +- retrievalmarket/mock/retrievalmarket.go | 4 +- retrievalmarket/rtvllog/db.go | 28 +- retrievalmarket/rtvllog/retrieval_log.go | 99 +- retrievalmarket/server/channelstate.go | 30 +- retrievalmarket/server/events.go | 42 +- retrievalmarket/server/gsunpaidretrieval.go | 196 +- .../server/gsunpaidretrieval_test.go | 642 ++-- retrievalmarket/server/provider_pieces.go | 6 +- retrievalmarket/server/queryask.go | 38 +- retrievalmarket/server/types.go | 10 +- retrievalmarket/server/validation.go | 33 +- .../types/legacyretrievaltypes/dealstatus.go | 186 + .../types/legacyretrievaltypes/events.go | 281 ++ .../migrations/maptypes/maptypes.go | 55 + .../migrations/maptypes/maptypes_cbor_gen.go | 1142 ++++++ .../migrations/migrations.go | 386 ++ .../migrations/migrations_cbor_gen.go | 1815 ++++++++++ .../types/legacyretrievaltypes/types.go | 509 +++ .../legacyretrievaltypes/types_cbor_gen.go | 2909 +++++++++++++++ storagemarket/dealfilter/cli.go | 8 +- storagemarket/helper.go | 6 +- storagemarket/lp2pimpl/net.go | 32 +- storagemarket/provider.go | 18 +- storagemarket/provider_test.go | 59 +- storagemarket/smtestutil/mocks.go | 14 +- storagemarket/storedask/storedask.go | 215 ++ storagemarket/types/legacytypes/dealstatus.go | 235 ++ .../types/legacytypes/filestore/file.go | 39 + .../types/legacytypes/filestore/filestore.go | 83 + .../types/legacytypes/filestore/types.go | 38 + .../legacytypes/migrations/migrations.go | 325 ++ .../migrations/migrations_cbor_gen.go | 2271 ++++++++++++ .../migrations/migrations_mapenc_types.go | 55 + .../migrations_mapenc_types_cbor_gen.go | 936 +++++ .../types/legacytypes/network/types.go | 79 + .../legacytypes/network/types_cbor_gen.go | 927 +++++ storagemarket/types/legacytypes/types.go | 319 ++ .../types/legacytypes/types_cbor_gen.go | 3139 +++++++++++++++++ storagemarket/types/mock_types/mocks.go | 5 +- storagemarket/types/types.go | 16 +- 171 files changed, 31072 insertions(+), 7566 deletions(-) delete mode 100644 cmd/boostd/legacy_data_transfers.go delete mode 100644 cmd/boostd/legacy_retrieval_deals.go create mode 100644 cmd/lib/stores/dagstore.go create mode 100644 cmd/lib/stores/error.go create mode 100644 cmd/lib/stores/filestore.go create mode 100644 cmd/lib/stores/kvcarbs.go create mode 100644 cmd/lib/stores/ro_bstores.go create mode 100644 cmd/lib/stores/rw_bstores.go create mode 100644 datatransfer/channelmonitor/channelmonitor.go create mode 100644 datatransfer/channels/block_index_cache.go create mode 100644 datatransfer/channels/channel_state.go create mode 100644 datatransfer/channels/channels.go create mode 100644 datatransfer/channels/channels_fsm.go create mode 100644 datatransfer/channels/internal/internalchannel.go create mode 100644 datatransfer/channels/internal/internalchannel_cbor_gen.go create mode 100644 datatransfer/channels/internal/migrations/migrations.go create mode 100644 datatransfer/encoding/encoding.go create mode 100644 datatransfer/errors.go create mode 100644 datatransfer/events.go create mode 100644 datatransfer/impl/environment.go create mode 100644 datatransfer/impl/events.go create mode 100644 datatransfer/impl/impl.go create mode 100644 datatransfer/impl/receiver.go create mode 100644 datatransfer/impl/restart.go create mode 100644 datatransfer/impl/timecounter.go create mode 100644 datatransfer/impl/utils.go create mode 100644 datatransfer/manager.go create mode 100644 datatransfer/message.go create mode 100644 datatransfer/message/message.go create mode 100644 datatransfer/message/message1_1/message.go create mode 100644 datatransfer/message/message1_1/transfer_message.go create mode 100644 datatransfer/message/message1_1/transfer_message_cbor_gen.go create mode 100644 datatransfer/message/message1_1/transfer_request.go create mode 100644 datatransfer/message/message1_1/transfer_request_cbor_gen.go create mode 100644 datatransfer/message/message1_1/transfer_response.go create mode 100644 datatransfer/message/message1_1/transfer_response_cbor_gen.go create mode 100644 datatransfer/message/message1_1prime/message.go create mode 100644 datatransfer/message/message1_1prime/schema.go create mode 100644 datatransfer/message/message1_1prime/schema.ipldsch create mode 100644 datatransfer/message/message1_1prime/transfer_message.go create mode 100644 datatransfer/message/message1_1prime/transfer_request.go create mode 100644 datatransfer/message/message1_1prime/transfer_response.go create mode 100644 datatransfer/message/types/message_types.go create mode 100644 datatransfer/network/interface.go create mode 100644 datatransfer/network/libp2p_impl.go create mode 100644 datatransfer/registry/registry.go create mode 100644 datatransfer/testutil/testutils.go create mode 100644 datatransfer/tracing/tracing.go create mode 100644 datatransfer/transport/graphsync/extension/gsextension.go create mode 100644 datatransfer/transport/graphsync/graphsync.go create mode 100644 datatransfer/types.go create mode 100644 datatransfer/types_cbor_gen.go create mode 100644 db/migrations/20231005140947_create_ask.sql create mode 100644 db/storageask.go delete mode 100644 itests/markets_v1_deal_test.go delete mode 100644 itests/markets_v1_offline_deal_test.go create mode 100644 lib/legacy/mocks/legacy_manager_mock.go delete mode 100644 markets/journal.go create mode 100644 markets/piecestore/impl/piecestore.go create mode 100644 markets/piecestore/migrations/migrations.go create mode 100644 markets/piecestore/migrations/migrations_cbor_gen.go create mode 100644 markets/piecestore/types.go create mode 100644 markets/piecestore/types_cbor_gen.go delete mode 100644 markets/pricing/cli.go delete mode 100644 markets/retrievaladapter/client.go delete mode 100644 markets/retrievaladapter/client_blockstore.go delete mode 100644 markets/retrievaladapter/provider.go delete mode 100644 markets/retrievaladapter/provider_test.go create mode 100644 markets/shared/ready.go create mode 100644 markets/shared/retrystream.go create mode 100644 markets/shared/shared.go create mode 100644 markets/shared/timecounter.go delete mode 100644 markets/storageadapter/client.go delete mode 100644 markets/storageadapter/client_blockstore.go delete mode 100644 markets/storageadapter/dealstatematcher.go delete mode 100644 markets/storageadapter/dealstatematcher_test.go delete mode 100644 markets/storageadapter/ondealsectorcommitted.go delete mode 100644 markets/storageadapter/ondealsectorcommitted_test.go delete mode 100644 markets/storageadapter/provider.go delete mode 100644 node/impl/boost_legacy.go delete mode 100644 node/modules/client.go delete mode 100644 node/modules/legacy_markets.go delete mode 100644 node/modules/provider_data_transfer.go delete mode 100644 node/modules/provider_piece_store.go delete mode 100644 node/modules/storageminer_dagstore.go delete mode 100644 retrievalmarket/lib/idxciddagstore.go delete mode 100644 retrievalmarket/lib/shardselector.go delete mode 100644 retrievalmarket/lib/shardselector_test.go create mode 100644 retrievalmarket/types/legacyretrievaltypes/dealstatus.go create mode 100644 retrievalmarket/types/legacyretrievaltypes/events.go create mode 100644 retrievalmarket/types/legacyretrievaltypes/migrations/maptypes/maptypes.go create mode 100644 retrievalmarket/types/legacyretrievaltypes/migrations/maptypes/maptypes_cbor_gen.go create mode 100644 retrievalmarket/types/legacyretrievaltypes/migrations/migrations.go create mode 100644 retrievalmarket/types/legacyretrievaltypes/migrations/migrations_cbor_gen.go create mode 100644 retrievalmarket/types/legacyretrievaltypes/types.go create mode 100644 retrievalmarket/types/legacyretrievaltypes/types_cbor_gen.go create mode 100644 storagemarket/storedask/storedask.go create mode 100644 storagemarket/types/legacytypes/dealstatus.go create mode 100644 storagemarket/types/legacytypes/filestore/file.go create mode 100644 storagemarket/types/legacytypes/filestore/filestore.go create mode 100644 storagemarket/types/legacytypes/filestore/types.go create mode 100644 storagemarket/types/legacytypes/migrations/migrations.go create mode 100644 storagemarket/types/legacytypes/migrations/migrations_cbor_gen.go create mode 100644 storagemarket/types/legacytypes/migrations/migrations_mapenc_types.go create mode 100644 storagemarket/types/legacytypes/migrations/migrations_mapenc_types_cbor_gen.go create mode 100644 storagemarket/types/legacytypes/network/types.go create mode 100644 storagemarket/types/legacytypes/network/types_cbor_gen.go create mode 100644 storagemarket/types/legacytypes/types.go create mode 100644 storagemarket/types/legacytypes/types_cbor_gen.go diff --git a/.circleci/config.yml b/.circleci/config.yml index fbac63ddd..11b5d4880 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -318,21 +318,11 @@ workflows: suite: itest-dummydeal target: "./itests/dummydeal_test.go" - - test: - name: test-itest-markets_v1_deal - suite: itest-markets_v1_deal - target: "./itests/markets_v1_deal_test.go" - - test: name: test-itest-markets_v1_identity_cid suite: itest-markets_v1_identity_cid target: "./itests/markets_v1_identity_cid_test.go" - - test: - name: test-itest-markets_v1_offline_deal - suite: itest-markets_v1_offline_deal - target: "./itests/markets_v1_offline_deal_test.go" - - test: name: test-itest-markets_v1_retrieval suite: itest-markets_v1_retrieval diff --git a/api/api.go b/api/api.go index e75054a08..4b8f14edc 100644 --- a/api/api.go +++ b/api/api.go @@ -3,17 +3,9 @@ package api import ( "context" - "github.com/filecoin-project/boost-gfm/retrievalmarket" - "github.com/filecoin-project/boost-gfm/storagemarket" smtypes "github.com/filecoin-project/boost/storagemarket/types" - "github.com/filecoin-project/go-address" - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-state-types/abi" - lapi "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/types" "github.com/google/uuid" "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p/core/peer" "github.com/multiformats/go-multihash" ) @@ -41,14 +33,6 @@ type Boost interface { BoostDeal(ctx context.Context, dealUuid uuid.UUID) (*smtypes.ProviderDealState, error) //perm:admin BoostDealBySignedProposalCid(ctx context.Context, proposalCid cid.Cid) (*smtypes.ProviderDealState, error) //perm:admin BoostDummyDeal(context.Context, smtypes.DealParams) (*ProviderDealRejectionInfo, error) //perm:admin - BoostDagstoreRegisterShard(ctx context.Context, key string) error //perm:admin - BoostDagstoreDestroyShard(ctx context.Context, key string) error //perm:admin - BoostDagstoreInitializeShard(ctx context.Context, key string) error //perm:admin - BoostDagstoreInitializeAll(ctx context.Context, params DagstoreInitializeAllParams) (<-chan DagstoreInitializeAllEvent, error) //perm:admin - BoostDagstoreRecoverShard(ctx context.Context, key string) error //perm:admin - BoostDagstoreGC(ctx context.Context) ([]DagstoreShardResult, error) //perm:admin - BoostDagstorePiecesContainingMultihash(ctx context.Context, mh multihash.Multihash) ([]cid.Cid, error) //perm:read - BoostDagstoreListShards(ctx context.Context) ([]DagstoreShardInfo, error) //perm:admin BoostMakeDeal(context.Context, smtypes.DealParams) (*ProviderDealRejectionInfo, error) //perm:write // MethodGroup: Blockstore @@ -59,75 +43,6 @@ type Boost interface { // MethodGroup: PieceDirectory PdBuildIndexForPieceCid(ctx context.Context, piececid cid.Cid) error //perm:admin - // RuntimeSubsystems returns the subsystems that are enabled - // in this instance. - RuntimeSubsystems(ctx context.Context) (lapi.MinerSubsystems, error) //perm:read - - // MethodGroup: LegacyMarket - MarketListRetrievalDeals(ctx context.Context) ([]retrievalmarket.ProviderDealState, error) //perm:read - MarketSetRetrievalAsk(ctx context.Context, rask *retrievalmarket.Ask) error //perm:admin - MarketGetRetrievalAsk(ctx context.Context) (*retrievalmarket.Ask, error) //perm:read - MarketSetAsk(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error //perm:admin - MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error) //perm:read - MarketListDataTransfers(ctx context.Context) ([]DataTransferChannel, error) //perm:write - MarketDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error) //perm:write - MarketRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write - MarketCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write - MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error //perm:write - MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) //perm:read - MarketPendingDeals(ctx context.Context) (lapi.PendingDealInfo, error) //perm:write - SectorsRefs(context.Context) (map[string][]lapi.SealedRef, error) //perm:read - - // MethodGroup: Actor - ActorSectorSize(context.Context, address.Address) (abi.SectorSize, error) //perm:read - - // MethodGroup: Deals - DealsConsiderOnlineStorageDeals(context.Context) (bool, error) //perm:admin - DealsSetConsiderOnlineStorageDeals(context.Context, bool) error //perm:admin - DealsConsiderOnlineRetrievalDeals(context.Context) (bool, error) //perm:admin - DealsSetConsiderOnlineRetrievalDeals(context.Context, bool) error //perm:admin - DealsPieceCidBlocklist(context.Context) ([]cid.Cid, error) //perm:admin - DealsSetPieceCidBlocklist(context.Context, []cid.Cid) error //perm:admin - DealsConsiderOfflineStorageDeals(context.Context) (bool, error) //perm:admin - DealsSetConsiderOfflineStorageDeals(context.Context, bool) error //perm:admin - DealsConsiderOfflineRetrievalDeals(context.Context) (bool, error) //perm:admin - DealsSetConsiderOfflineRetrievalDeals(context.Context, bool) error //perm:admin - DealsConsiderVerifiedStorageDeals(context.Context) (bool, error) //perm:admin - DealsSetConsiderVerifiedStorageDeals(context.Context, bool) error //perm:admin - DealsConsiderUnverifiedStorageDeals(context.Context) (bool, error) //perm:admin - DealsSetConsiderUnverifiedStorageDeals(context.Context, bool) error //perm:admin - // MethodGroup: Misc OnlineBackup(context.Context, string) error //perm:admin } - -// DagstoreShardInfo is the serialized form of dagstore.DagstoreShardInfo that -// we expose through JSON-RPC to avoid clients having to depend on the -// dagstore lib. -type DagstoreShardInfo struct { - Key string - State string - Error string -} - -// DagstoreShardResult enumerates results per shard. -type DagstoreShardResult struct { - Key string - Success bool - Error string -} - -type DagstoreInitializeAllParams struct { - MaxConcurrency int - IncludeSealed bool -} - -// DagstoreInitializeAllEvent represents an initialization event. -type DagstoreInitializeAllEvent struct { - Key string - Event string // "start", "end" - Success bool - Error string - Total int - Current int -} diff --git a/api/api_full.go b/api/api_full.go index e306ddd1e..bd51d90a4 100644 --- a/api/api_full.go +++ b/api/api_full.go @@ -5,17 +5,11 @@ import ( "fmt" "time" - "github.com/ipfs/go-cid" - textselector "github.com/ipld/go-ipld-selector-text-lite" - "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" - datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" - - "github.com/filecoin-project/boost-gfm/retrievalmarket" - "github.com/filecoin-project/boost-gfm/storagemarket" + "github.com/ipfs/go-cid" "github.com/filecoin-project/go-state-types/builtin/v8/paych" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -89,29 +83,6 @@ type Import struct { CARPath string } -type DealInfo struct { - ProposalCid cid.Cid - State storagemarket.StorageDealStatus - Message string // more information about deal state, particularly errors - DealStages *storagemarket.DealStages - Provider address.Address - - DataRef *storagemarket.DataRef - PieceCID cid.Cid - Size uint64 - - PricePerEpoch types.BigInt - Duration uint64 - - DealID abi.DealID - - CreationTime time.Time - Verified bool - - TransferChannelID *datatransfer.ChannelID - DataTransfer *DataTransferChannel -} - type MsgLookup struct { Message cid.Cid // Can be different than requested, in case it was replaced, but only gas values changed Receipt types.MessageReceipt @@ -222,37 +193,6 @@ type MinerPower struct { HasMinPower bool } -type QueryOffer struct { - Err string - - Root cid.Cid - Piece *cid.Cid - - Size uint64 - MinPrice types.BigInt - UnsealPrice types.BigInt - PaymentInterval uint64 - PaymentIntervalIncrease uint64 - Miner address.Address - MinerPeer retrievalmarket.RetrievalPeer -} - -func (o *QueryOffer) Order(client address.Address) RetrievalOrder { - return RetrievalOrder{ - Root: o.Root, - Piece: o.Piece, - Size: o.Size, - Total: o.MinPrice, - UnsealPrice: o.UnsealPrice, - PaymentInterval: o.PaymentInterval, - PaymentIntervalIncrease: o.PaymentIntervalIncrease, - Client: client, - - Miner: o.Miner, - MinerPeer: &o.MinerPeer, - } -} - type MarketBalance struct { Escrow big.Int Locked big.Int @@ -263,24 +203,6 @@ type MarketDeal struct { State market.DealState } -type RetrievalOrder struct { - // TODO: make this less unixfs specific - Root cid.Cid - Piece *cid.Cid - DatamodelPathSelector *textselector.Expression - Size uint64 - - FromLocalCAR string // if specified, get data from a local CARv2 file. - // TODO: support offset - Total types.BigInt - UnsealPrice types.BigInt - PaymentInterval uint64 - PaymentIntervalIncrease uint64 - Client address.Address - Miner address.Address - MinerPeer *retrievalmarket.RetrievalPeer -} - type InvocResult struct { MsgCid cid.Cid Msg *types.Message diff --git a/api/docgen/docgen.go b/api/docgen/docgen.go index ab89bf121..5399ad37c 100644 --- a/api/docgen/docgen.go +++ b/api/docgen/docgen.go @@ -12,8 +12,7 @@ import ( "time" "unicode" - "github.com/filecoin-project/boost-gfm/filestore" - "github.com/filecoin-project/boost-gfm/retrievalmarket" + "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" datatransfer "github.com/filecoin-project/go-data-transfer" @@ -94,7 +93,7 @@ func init() { storeIDExample := imports.ID(50) textSelExample := textselector.Expression("Links/21/Hash/Links/42/Hash") - clientEvent := retrievalmarket.ClientEventDealAccepted + clientEvent := legacyretrievaltypes.ClientEventDealAccepted addExample(bitfield.NewFromSet([]uint64{5})) addExample(abi.RegisteredSealProof_StackedDrg32GiBV1_1) @@ -129,8 +128,8 @@ func init() { addExample(&storeIDExample) addExample(clientEvent) addExample(&clientEvent) - addExample(retrievalmarket.ClientEventDealAccepted) - addExample(retrievalmarket.DealStatusNew) + addExample(legacyretrievaltypes.ClientEventDealAccepted) + addExample(legacyretrievaltypes.DealStatusNew) addExample(&textSelExample) addExample(network.ReachabilityPublic) addExample(build.TestNetworkVersion) @@ -186,10 +185,9 @@ func init() { ExampleValues[reflect.TypeOf(struct{ A multiaddr.Multiaddr }{}).Field(0).Type] = maddr // miner specific - addExample(filestore.Path(".lotusminer/fstmp123")) si := uint64(12) addExample(&si) - addExample(retrievalmarket.DealID(5)) + addExample(legacyretrievaltypes.DealID(5)) addExample(abi.ActorID(1000)) addExample(storiface.ID("76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8")) addExample(storiface.FTUnsealed) @@ -273,15 +271,6 @@ func init() { addExample(api.CheckStatusCode(0)) addExample(map[string]interface{}{"abc": 123}) - addExample(api.DagstoreShardResult{ - Key: "baga6ea4seaqecmtz7iak33dsfshi627abz4i4665dfuzr3qfs4bmad6dx3iigdq", - Error: "", - }) - addExample(api.DagstoreShardInfo{ - Key: "baga6ea4seaqecmtz7iak33dsfshi627abz4i4665dfuzr3qfs4bmad6dx3iigdq", - State: "ShardStateAvailable", - Error: "", - }) addExample(storiface.ResourceTable) addExample(network.ScopeStat{ Memory: 123, diff --git a/api/proxy_gen.go b/api/proxy_gen.go index bbdefb2ae..b63af95c9 100644 --- a/api/proxy_gen.go +++ b/api/proxy_gen.go @@ -7,15 +7,10 @@ import ( "errors" "time" - "github.com/filecoin-project/boost-gfm/retrievalmarket" - "github.com/filecoin-project/boost-gfm/storagemarket" smtypes "github.com/filecoin-project/boost/storagemarket/types" "github.com/filecoin-project/go-address" - datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/filecoin-project/go-jsonrpc/auth" - "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" - lapi "github.com/filecoin-project/lotus/api" lotus_api "github.com/filecoin-project/lotus/api" apitypes "github.com/filecoin-project/lotus/api/types" "github.com/filecoin-project/lotus/chain/types" @@ -36,30 +31,12 @@ type BoostStruct struct { NetStruct Internal struct { - ActorSectorSize func(p0 context.Context, p1 address.Address) (abi.SectorSize, error) `perm:"read"` - BlockstoreGet func(p0 context.Context, p1 cid.Cid) ([]byte, error) `perm:"read"` BlockstoreGetSize func(p0 context.Context, p1 cid.Cid) (int, error) `perm:"read"` BlockstoreHas func(p0 context.Context, p1 cid.Cid) (bool, error) `perm:"read"` - BoostDagstoreDestroyShard func(p0 context.Context, p1 string) error `perm:"admin"` - - BoostDagstoreGC func(p0 context.Context) ([]DagstoreShardResult, error) `perm:"admin"` - - BoostDagstoreInitializeAll func(p0 context.Context, p1 DagstoreInitializeAllParams) (<-chan DagstoreInitializeAllEvent, error) `perm:"admin"` - - BoostDagstoreInitializeShard func(p0 context.Context, p1 string) error `perm:"admin"` - - BoostDagstoreListShards func(p0 context.Context) ([]DagstoreShardInfo, error) `perm:"admin"` - - BoostDagstorePiecesContainingMultihash func(p0 context.Context, p1 multihash.Multihash) ([]cid.Cid, error) `perm:"read"` - - BoostDagstoreRecoverShard func(p0 context.Context, p1 string) error `perm:"admin"` - - BoostDagstoreRegisterShard func(p0 context.Context, p1 string) error `perm:"admin"` - BoostDeal func(p0 context.Context, p1 uuid.UUID) (*smtypes.ProviderDealState, error) `perm:"admin"` BoostDealBySignedProposalCid func(p0 context.Context, p1 cid.Cid) (*smtypes.ProviderDealState, error) `perm:"admin"` @@ -106,37 +83,9 @@ type BoostStruct struct { DealsSetPieceCidBlocklist func(p0 context.Context, p1 []cid.Cid) error `perm:"admin"` - MarketCancelDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"` - - MarketDataTransferUpdates func(p0 context.Context) (<-chan DataTransferChannel, error) `perm:"write"` - - MarketGetAsk func(p0 context.Context) (*storagemarket.SignedStorageAsk, error) `perm:"read"` - - MarketGetRetrievalAsk func(p0 context.Context) (*retrievalmarket.Ask, error) `perm:"read"` - - MarketImportDealData func(p0 context.Context, p1 cid.Cid, p2 string) error `perm:"write"` - - MarketListDataTransfers func(p0 context.Context) ([]DataTransferChannel, error) `perm:"write"` - - MarketListIncompleteDeals func(p0 context.Context) ([]storagemarket.MinerDeal, error) `perm:"read"` - - MarketListRetrievalDeals func(p0 context.Context) ([]retrievalmarket.ProviderDealState, error) `perm:"read"` - - MarketPendingDeals func(p0 context.Context) (lapi.PendingDealInfo, error) `perm:"write"` - - MarketRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"` - - MarketSetAsk func(p0 context.Context, p1 types.BigInt, p2 types.BigInt, p3 abi.ChainEpoch, p4 abi.PaddedPieceSize, p5 abi.PaddedPieceSize) error `perm:"admin"` - - MarketSetRetrievalAsk func(p0 context.Context, p1 *retrievalmarket.Ask) error `perm:"admin"` - OnlineBackup func(p0 context.Context, p1 string) error `perm:"admin"` PdBuildIndexForPieceCid func(p0 context.Context, p1 cid.Cid) error `perm:"admin"` - - RuntimeSubsystems func(p0 context.Context) (lapi.MinerSubsystems, error) `perm:"read"` - - SectorsRefs func(p0 context.Context) (map[string][]lapi.SealedRef, error) `perm:"read"` } } @@ -265,17 +214,6 @@ type WalletStruct struct { type WalletStub struct { } -func (s *BoostStruct) ActorSectorSize(p0 context.Context, p1 address.Address) (abi.SectorSize, error) { - if s.Internal.ActorSectorSize == nil { - return *new(abi.SectorSize), ErrNotSupported - } - return s.Internal.ActorSectorSize(p0, p1) -} - -func (s *BoostStub) ActorSectorSize(p0 context.Context, p1 address.Address) (abi.SectorSize, error) { - return *new(abi.SectorSize), ErrNotSupported -} - func (s *BoostStruct) BlockstoreGet(p0 context.Context, p1 cid.Cid) ([]byte, error) { if s.Internal.BlockstoreGet == nil { return *new([]byte), ErrNotSupported @@ -309,94 +247,6 @@ func (s *BoostStub) BlockstoreHas(p0 context.Context, p1 cid.Cid) (bool, error) return false, ErrNotSupported } -func (s *BoostStruct) BoostDagstoreDestroyShard(p0 context.Context, p1 string) error { - if s.Internal.BoostDagstoreDestroyShard == nil { - return ErrNotSupported - } - return s.Internal.BoostDagstoreDestroyShard(p0, p1) -} - -func (s *BoostStub) BoostDagstoreDestroyShard(p0 context.Context, p1 string) error { - return ErrNotSupported -} - -func (s *BoostStruct) BoostDagstoreGC(p0 context.Context) ([]DagstoreShardResult, error) { - if s.Internal.BoostDagstoreGC == nil { - return *new([]DagstoreShardResult), ErrNotSupported - } - return s.Internal.BoostDagstoreGC(p0) -} - -func (s *BoostStub) BoostDagstoreGC(p0 context.Context) ([]DagstoreShardResult, error) { - return *new([]DagstoreShardResult), ErrNotSupported -} - -func (s *BoostStruct) BoostDagstoreInitializeAll(p0 context.Context, p1 DagstoreInitializeAllParams) (<-chan DagstoreInitializeAllEvent, error) { - if s.Internal.BoostDagstoreInitializeAll == nil { - return nil, ErrNotSupported - } - return s.Internal.BoostDagstoreInitializeAll(p0, p1) -} - -func (s *BoostStub) BoostDagstoreInitializeAll(p0 context.Context, p1 DagstoreInitializeAllParams) (<-chan DagstoreInitializeAllEvent, error) { - return nil, ErrNotSupported -} - -func (s *BoostStruct) BoostDagstoreInitializeShard(p0 context.Context, p1 string) error { - if s.Internal.BoostDagstoreInitializeShard == nil { - return ErrNotSupported - } - return s.Internal.BoostDagstoreInitializeShard(p0, p1) -} - -func (s *BoostStub) BoostDagstoreInitializeShard(p0 context.Context, p1 string) error { - return ErrNotSupported -} - -func (s *BoostStruct) BoostDagstoreListShards(p0 context.Context) ([]DagstoreShardInfo, error) { - if s.Internal.BoostDagstoreListShards == nil { - return *new([]DagstoreShardInfo), ErrNotSupported - } - return s.Internal.BoostDagstoreListShards(p0) -} - -func (s *BoostStub) BoostDagstoreListShards(p0 context.Context) ([]DagstoreShardInfo, error) { - return *new([]DagstoreShardInfo), ErrNotSupported -} - -func (s *BoostStruct) BoostDagstorePiecesContainingMultihash(p0 context.Context, p1 multihash.Multihash) ([]cid.Cid, error) { - if s.Internal.BoostDagstorePiecesContainingMultihash == nil { - return *new([]cid.Cid), ErrNotSupported - } - return s.Internal.BoostDagstorePiecesContainingMultihash(p0, p1) -} - -func (s *BoostStub) BoostDagstorePiecesContainingMultihash(p0 context.Context, p1 multihash.Multihash) ([]cid.Cid, error) { - return *new([]cid.Cid), ErrNotSupported -} - -func (s *BoostStruct) BoostDagstoreRecoverShard(p0 context.Context, p1 string) error { - if s.Internal.BoostDagstoreRecoverShard == nil { - return ErrNotSupported - } - return s.Internal.BoostDagstoreRecoverShard(p0, p1) -} - -func (s *BoostStub) BoostDagstoreRecoverShard(p0 context.Context, p1 string) error { - return ErrNotSupported -} - -func (s *BoostStruct) BoostDagstoreRegisterShard(p0 context.Context, p1 string) error { - if s.Internal.BoostDagstoreRegisterShard == nil { - return ErrNotSupported - } - return s.Internal.BoostDagstoreRegisterShard(p0, p1) -} - -func (s *BoostStub) BoostDagstoreRegisterShard(p0 context.Context, p1 string) error { - return ErrNotSupported -} - func (s *BoostStruct) BoostDeal(p0 context.Context, p1 uuid.UUID) (*smtypes.ProviderDealState, error) { if s.Internal.BoostDeal == nil { return nil, ErrNotSupported @@ -650,138 +500,6 @@ func (s *BoostStub) DealsSetPieceCidBlocklist(p0 context.Context, p1 []cid.Cid) return ErrNotSupported } -func (s *BoostStruct) MarketCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { - if s.Internal.MarketCancelDataTransfer == nil { - return ErrNotSupported - } - return s.Internal.MarketCancelDataTransfer(p0, p1, p2, p3) -} - -func (s *BoostStub) MarketCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { - return ErrNotSupported -} - -func (s *BoostStruct) MarketDataTransferUpdates(p0 context.Context) (<-chan DataTransferChannel, error) { - if s.Internal.MarketDataTransferUpdates == nil { - return nil, ErrNotSupported - } - return s.Internal.MarketDataTransferUpdates(p0) -} - -func (s *BoostStub) MarketDataTransferUpdates(p0 context.Context) (<-chan DataTransferChannel, error) { - return nil, ErrNotSupported -} - -func (s *BoostStruct) MarketGetAsk(p0 context.Context) (*storagemarket.SignedStorageAsk, error) { - if s.Internal.MarketGetAsk == nil { - return nil, ErrNotSupported - } - return s.Internal.MarketGetAsk(p0) -} - -func (s *BoostStub) MarketGetAsk(p0 context.Context) (*storagemarket.SignedStorageAsk, error) { - return nil, ErrNotSupported -} - -func (s *BoostStruct) MarketGetRetrievalAsk(p0 context.Context) (*retrievalmarket.Ask, error) { - if s.Internal.MarketGetRetrievalAsk == nil { - return nil, ErrNotSupported - } - return s.Internal.MarketGetRetrievalAsk(p0) -} - -func (s *BoostStub) MarketGetRetrievalAsk(p0 context.Context) (*retrievalmarket.Ask, error) { - return nil, ErrNotSupported -} - -func (s *BoostStruct) MarketImportDealData(p0 context.Context, p1 cid.Cid, p2 string) error { - if s.Internal.MarketImportDealData == nil { - return ErrNotSupported - } - return s.Internal.MarketImportDealData(p0, p1, p2) -} - -func (s *BoostStub) MarketImportDealData(p0 context.Context, p1 cid.Cid, p2 string) error { - return ErrNotSupported -} - -func (s *BoostStruct) MarketListDataTransfers(p0 context.Context) ([]DataTransferChannel, error) { - if s.Internal.MarketListDataTransfers == nil { - return *new([]DataTransferChannel), ErrNotSupported - } - return s.Internal.MarketListDataTransfers(p0) -} - -func (s *BoostStub) MarketListDataTransfers(p0 context.Context) ([]DataTransferChannel, error) { - return *new([]DataTransferChannel), ErrNotSupported -} - -func (s *BoostStruct) MarketListIncompleteDeals(p0 context.Context) ([]storagemarket.MinerDeal, error) { - if s.Internal.MarketListIncompleteDeals == nil { - return *new([]storagemarket.MinerDeal), ErrNotSupported - } - return s.Internal.MarketListIncompleteDeals(p0) -} - -func (s *BoostStub) MarketListIncompleteDeals(p0 context.Context) ([]storagemarket.MinerDeal, error) { - return *new([]storagemarket.MinerDeal), ErrNotSupported -} - -func (s *BoostStruct) MarketListRetrievalDeals(p0 context.Context) ([]retrievalmarket.ProviderDealState, error) { - if s.Internal.MarketListRetrievalDeals == nil { - return *new([]retrievalmarket.ProviderDealState), ErrNotSupported - } - return s.Internal.MarketListRetrievalDeals(p0) -} - -func (s *BoostStub) MarketListRetrievalDeals(p0 context.Context) ([]retrievalmarket.ProviderDealState, error) { - return *new([]retrievalmarket.ProviderDealState), ErrNotSupported -} - -func (s *BoostStruct) MarketPendingDeals(p0 context.Context) (lapi.PendingDealInfo, error) { - if s.Internal.MarketPendingDeals == nil { - return *new(lapi.PendingDealInfo), ErrNotSupported - } - return s.Internal.MarketPendingDeals(p0) -} - -func (s *BoostStub) MarketPendingDeals(p0 context.Context) (lapi.PendingDealInfo, error) { - return *new(lapi.PendingDealInfo), ErrNotSupported -} - -func (s *BoostStruct) MarketRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { - if s.Internal.MarketRestartDataTransfer == nil { - return ErrNotSupported - } - return s.Internal.MarketRestartDataTransfer(p0, p1, p2, p3) -} - -func (s *BoostStub) MarketRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { - return ErrNotSupported -} - -func (s *BoostStruct) MarketSetAsk(p0 context.Context, p1 types.BigInt, p2 types.BigInt, p3 abi.ChainEpoch, p4 abi.PaddedPieceSize, p5 abi.PaddedPieceSize) error { - if s.Internal.MarketSetAsk == nil { - return ErrNotSupported - } - return s.Internal.MarketSetAsk(p0, p1, p2, p3, p4, p5) -} - -func (s *BoostStub) MarketSetAsk(p0 context.Context, p1 types.BigInt, p2 types.BigInt, p3 abi.ChainEpoch, p4 abi.PaddedPieceSize, p5 abi.PaddedPieceSize) error { - return ErrNotSupported -} - -func (s *BoostStruct) MarketSetRetrievalAsk(p0 context.Context, p1 *retrievalmarket.Ask) error { - if s.Internal.MarketSetRetrievalAsk == nil { - return ErrNotSupported - } - return s.Internal.MarketSetRetrievalAsk(p0, p1) -} - -func (s *BoostStub) MarketSetRetrievalAsk(p0 context.Context, p1 *retrievalmarket.Ask) error { - return ErrNotSupported -} - func (s *BoostStruct) OnlineBackup(p0 context.Context, p1 string) error { if s.Internal.OnlineBackup == nil { return ErrNotSupported @@ -804,28 +522,6 @@ func (s *BoostStub) PdBuildIndexForPieceCid(p0 context.Context, p1 cid.Cid) erro return ErrNotSupported } -func (s *BoostStruct) RuntimeSubsystems(p0 context.Context) (lapi.MinerSubsystems, error) { - if s.Internal.RuntimeSubsystems == nil { - return *new(lapi.MinerSubsystems), ErrNotSupported - } - return s.Internal.RuntimeSubsystems(p0) -} - -func (s *BoostStub) RuntimeSubsystems(p0 context.Context) (lapi.MinerSubsystems, error) { - return *new(lapi.MinerSubsystems), ErrNotSupported -} - -func (s *BoostStruct) SectorsRefs(p0 context.Context) (map[string][]lapi.SealedRef, error) { - if s.Internal.SectorsRefs == nil { - return *new(map[string][]lapi.SealedRef), ErrNotSupported - } - return s.Internal.SectorsRefs(p0) -} - -func (s *BoostStub) SectorsRefs(p0 context.Context) (map[string][]lapi.SealedRef, error) { - return *new(map[string][]lapi.SealedRef), ErrNotSupported -} - func (s *ChainIOStruct) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) { if s.Internal.ChainHasObj == nil { return false, ErrNotSupported diff --git a/api/types.go b/api/types.go index eaa5865f3..93558f324 100644 --- a/api/types.go +++ b/api/types.go @@ -2,13 +2,10 @@ package api import ( "encoding/json" - "fmt" "time" - "github.com/filecoin-project/boost-gfm/retrievalmarket" "github.com/filecoin-project/lotus/chain/types" - datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/filecoin-project/go-state-types/abi" "github.com/ipfs/go-cid" @@ -61,51 +58,6 @@ type MessageSendSpec struct { MaxFee abi.TokenAmount } -type DataTransferChannel struct { - TransferID datatransfer.TransferID - Status datatransfer.Status - BaseCID cid.Cid - IsInitiator bool - IsSender bool - Voucher string - Message string - OtherPeer peer.ID - Transferred uint64 - Stages *datatransfer.ChannelStages -} - -// NewDataTransferChannel constructs an API DataTransferChannel type from full channel state snapshot and a host id -func NewDataTransferChannel(hostID peer.ID, channelState datatransfer.ChannelState) DataTransferChannel { - channel := DataTransferChannel{ - TransferID: channelState.TransferID(), - Status: channelState.Status(), - BaseCID: channelState.BaseCID(), - IsSender: channelState.Sender() == hostID, - Message: channelState.Message(), - } - stringer, ok := channelState.Voucher().(fmt.Stringer) - if ok { - channel.Voucher = stringer.String() - } else { - voucherJSON, err := json.Marshal(channelState.Voucher()) - if err != nil { - channel.Voucher = fmt.Errorf("Voucher Serialization: %w", err).Error() - } else { - channel.Voucher = string(voucherJSON) - } - } - if channel.IsSender { - channel.IsInitiator = !channelState.IsPull() - channel.Transferred = channelState.Sent() - channel.OtherPeer = channelState.Recipient() - } else { - channel.IsInitiator = channelState.IsPull() - channel.Transferred = channelState.Received() - channel.OtherPeer = channelState.Sender() - } - return channel -} - type ExtendedPeerInfo struct { ID peer.ID Agent string @@ -179,24 +131,6 @@ type MessagePrototype struct { ValidNonce bool } -type RetrievalInfo struct { - PayloadCID cid.Cid - ID retrievalmarket.DealID - PieceCID *cid.Cid - PricePerByte abi.TokenAmount - UnsealPrice abi.TokenAmount - - Status retrievalmarket.DealStatus - Message string // more information about deal state, particularly errors - Provider peer.ID - BytesReceived uint64 - BytesPaidFor uint64 - TotalPaid abi.TokenAmount - - TransferChannelID *datatransfer.ChannelID - DataTransfer *DataTransferChannel -} - type SealingPipelineState struct { TaskJobsCount map[string]int MaxWaitDealsSectors uint64 diff --git a/cmd/boost/provider_cmd.go b/cmd/boost/provider_cmd.go index efef74db9..cb3732cfd 100644 --- a/cmd/boost/provider_cmd.go +++ b/cmd/boost/provider_cmd.go @@ -5,17 +5,15 @@ import ( "sort" "strings" - "github.com/filecoin-project/boost-gfm/retrievalmarket" - "github.com/filecoin-project/boost-gfm/storagemarket/network" bcli "github.com/filecoin-project/boost/cli" clinode "github.com/filecoin-project/boost/cli/node" "github.com/filecoin-project/boost/cmd" "github.com/filecoin-project/boost/retrievalmarket/lp2pimpl" + "github.com/filecoin-project/boost/storagemarket/types/legacytypes/network" "github.com/filecoin-project/boostd-data/shared/cliutil" "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" - "github.com/ipfs/go-cid" "github.com/ipni/go-libipni/maurl" "github.com/multiformats/go-multiaddr" "github.com/urfave/cli/v2" @@ -32,7 +30,6 @@ var providerCmd = &cli.Command{ Subcommands: []*cli.Command{ libp2pInfoCmd, storageAskCmd, - retrievalAskCmd, retrievalTransportsCmd, }, } @@ -209,100 +206,6 @@ var storageAskCmd = &cli.Command{ }, } -var retrievalAskCmd = &cli.Command{ - Name: "retrieval-ask", - Usage: "Query a storage provider's retrieval ask", - ArgsUsage: "[provider] [data CID]", - Flags: []cli.Flag{ - &cli.Int64Flag{ - Name: "size", - Usage: "data size in bytes", - }, - }, - Action: func(cctx *cli.Context) error { - ctx := bcli.ReqContext(cctx) - - afmt := NewAppFmt(cctx.App) - if cctx.NArg() != 2 { - afmt.Println("Usage: retrieval-ask [provider] [data CID]") - return nil - } - - n, err := clinode.Setup(cctx.String(cmd.FlagRepo.Name)) - if err != nil { - return err - } - - api, closer, err := lcli.GetGatewayAPI(cctx) - if err != nil { - return fmt.Errorf("cant setup gateway connection: %w", err) - } - defer closer() - - maddr, err := address.NewFromString(cctx.Args().First()) - if err != nil { - return err - } - - dataCid, err := cid.Parse(cctx.Args().Get(1)) - if err != nil { - return fmt.Errorf("parsing data cid: %w", err) - } - - addrInfo, err := cmd.GetAddrInfo(ctx, api, maddr) - if err != nil { - return err - } - - log.Debugw("found storage provider", "id", addrInfo.ID, "multiaddrs", addrInfo.Addrs, "addr", maddr) - - if err := n.Host.Connect(ctx, *addrInfo); err != nil { - return fmt.Errorf("failed to connect to peer %s: %w", addrInfo.ID, err) - } - - s, err := n.Host.NewStream(ctx, addrInfo.ID, QueryProtocolID) - if err != nil { - return fmt.Errorf("failed to open stream to peer %s: %w", addrInfo.ID, err) - } - defer s.Close() - - req := retrievalmarket.Query{ - PayloadCID: dataCid, - QueryParams: retrievalmarket.QueryParams{}, - } - - var ask retrievalmarket.QueryResponse - - if err := doRpc(ctx, s, &req, &ask); err != nil { - return fmt.Errorf("send retrieval-ask request rpc: %w", err) - } - - afmt.Printf("Status: %d\n", ask.Status) - if ask.Status != 0 { - return nil - } - afmt.Printf("Ask: %s\n", maddr) - afmt.Printf("Unseal price: %s\n", types.FIL(ask.UnsealPrice)) - afmt.Printf("Price per byte: %s\n", types.FIL(ask.MinPricePerByte)) - afmt.Printf("Payment interval: %s\n", types.SizeStr(types.NewInt(ask.MaxPaymentInterval))) - afmt.Printf("Payment interval increase: %s\n", types.SizeStr(types.NewInt(ask.MaxPaymentIntervalIncrease))) - - size := cctx.Uint64("size") - if size == 0 { - if ask.Size == 0 { - return nil - } - size = ask.Size - afmt.Printf("Size: %s\n", types.SizeStr(types.NewInt(ask.Size))) - } - transferPrice := types.BigMul(ask.MinPricePerByte, types.NewInt(size)) - totalPrice := types.BigAdd(ask.UnsealPrice, transferPrice) - afmt.Printf("Total price for %d bytes: %s\n", size, types.FIL(totalPrice)) - - return nil - }, -} - var retrievalTransportsCmd = &cli.Command{ Name: "retrieval-transports", Usage: "Query a storage provider's available retrieval transports (libp2p, http, etc)", diff --git a/cmd/boostd/import_data.go b/cmd/boostd/import_data.go index bed8bcb33..35adfa617 100644 --- a/cmd/boostd/import_data.go +++ b/cmd/boostd/import_data.go @@ -77,19 +77,7 @@ var importDataCmd = &cli.Command{ return err } - if deleteAfterImport { - return fmt.Errorf("cannot find boost deal with proposal cid %s and legacy deal data cannot be automatically deleted after import (only new deals)", proposalCid) - } - - // The deal is not in the boost database, try the legacy - // markets datastore (v1.1.0 deal) - err := napi.MarketImportDealData(cctx.Context, *proposalCid, filePath) - if err != nil { - return fmt.Errorf("couldnt import v1.1.0 deal, or find boost deal: %w", err) - } - - fmt.Printf("Offline deal import for v1.1.0 deal %s scheduled for execution\n", proposalCid.String()) - return nil + return fmt.Errorf("cannot find boost deal with proposal cid %s and legacy deals are no olnger supported", proposalCid) } // Get the deal UUID from the deal diff --git a/cmd/boostd/legacy_data_transfers.go b/cmd/boostd/legacy_data_transfers.go deleted file mode 100644 index 425417140..000000000 --- a/cmd/boostd/legacy_data_transfers.go +++ /dev/null @@ -1,325 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - "errors" - "fmt" - bapi "github.com/filecoin-project/boost/api" - datatransferv2 "github.com/filecoin-project/go-data-transfer/v2" - "github.com/filecoin-project/lotus/api" - "os" - "strconv" - "time" - - tm "github.com/buger/goterm" - bcli "github.com/filecoin-project/boost/cli" - datatransfer "github.com/filecoin-project/go-data-transfer" - lcli "github.com/filecoin-project/lotus/cli" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/urfave/cli/v2" -) - -var dataTransfersCmd = &cli.Command{ - Name: "data-transfers", - Usage: "Manage legacy data transfers (Markets V1)", - Category: "legacy", - Subcommands: []*cli.Command{ - transfersListCmd, - marketRestartTransfer, - marketCancelTransfer, - transfersDiagnosticsCmd, - }, -} - -var marketRestartTransfer = &cli.Command{ - Name: "restart", - Usage: "Force restart a stalled data transfer", - ArgsUsage: "", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "peerid", - Usage: "narrow to transfer with specific peer", - }, - &cli.BoolFlag{ - Name: "initiator", - Usage: "specify only transfers where peer is/is not initiator", - Value: false, - }, - }, - Action: func(cctx *cli.Context) error { - if !cctx.Args().Present() { - return cli.ShowCommandHelp(cctx, cctx.Command.Name) - } - nodeApi, closer, err := bcli.GetBoostAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := lcli.ReqContext(cctx) - - transferUint, err := strconv.ParseUint(cctx.Args().First(), 10, 64) - if err != nil { - return fmt.Errorf("Error reading transfer ID: %w", err) - } - transferID := datatransfer.TransferID(transferUint) - initiator := cctx.Bool("initiator") - var other peer.ID - if pidstr := cctx.String("peerid"); pidstr != "" { - p, err := peer.Decode(pidstr) - if err != nil { - return err - } - other = p - } else { - channels, err := nodeApi.MarketListDataTransfers(ctx) - if err != nil { - return err - } - found := false - for _, channel := range channels { - if channel.IsInitiator == initiator && channel.TransferID == transferID { - other = channel.OtherPeer - found = true - break - } - } - if !found { - return errors.New("unable to find matching data transfer") - } - } - - return nodeApi.MarketRestartDataTransfer(ctx, transferID, other, initiator) - }, -} - -var marketCancelTransfer = &cli.Command{ - Name: "cancel", - Usage: "Force cancel a data transfer", - ArgsUsage: "", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "peerid", - Usage: "narrow to transfer with specific peer", - }, - &cli.BoolFlag{ - Name: "initiator", - Usage: "specify only transfers where peer is/is not initiator", - Value: false, - }, - &cli.DurationFlag{ - Name: "cancel-timeout", - Usage: "time to wait for cancel to be sent to client", - Value: 5 * time.Second, - }, - }, - Action: func(cctx *cli.Context) error { - if !cctx.Args().Present() { - return cli.ShowCommandHelp(cctx, cctx.Command.Name) - } - nodeApi, closer, err := bcli.GetBoostAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := lcli.ReqContext(cctx) - - transferUint, err := strconv.ParseUint(cctx.Args().First(), 10, 64) - if err != nil { - return fmt.Errorf("Error reading transfer ID: %w", err) - } - transferID := datatransfer.TransferID(transferUint) - initiator := cctx.Bool("initiator") - var other peer.ID - if pidstr := cctx.String("peerid"); pidstr != "" { - p, err := peer.Decode(pidstr) - if err != nil { - return err - } - other = p - } else { - channels, err := nodeApi.MarketListDataTransfers(ctx) - if err != nil { - return err - } - found := false - for _, channel := range channels { - if channel.IsInitiator == initiator && channel.TransferID == transferID { - other = channel.OtherPeer - found = true - break - } - } - if !found { - return errors.New("unable to find matching data transfer") - } - } - - timeoutCtx, cancel := context.WithTimeout(ctx, cctx.Duration("cancel-timeout")) - defer cancel() - return nodeApi.MarketCancelDataTransfer(timeoutCtx, transferID, other, initiator) - }, -} - -var transfersListCmd = &cli.Command{ - Name: "list", - Usage: "List ongoing data transfers for this miner", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "verbose", - Aliases: []string{"v"}, - Usage: "print verbose transfer details", - }, - &cli.BoolFlag{ - Name: "completed", - Usage: "show completed data transfers", - }, - &cli.BoolFlag{ - Name: "watch", - Usage: "watch deal updates in real-time, rather than a one time list", - }, - &cli.BoolFlag{ - Name: "show-failed", - Usage: "show failed/cancelled transfers", - }, - }, - Action: func(cctx *cli.Context) error { - api, closer, err := bcli.GetBoostAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := lcli.ReqContext(cctx) - - channels, err := api.MarketListDataTransfers(ctx) - if err != nil { - return err - } - - verbose := cctx.Bool("verbose") - completed := cctx.Bool("completed") - watch := cctx.Bool("watch") - showFailed := cctx.Bool("show-failed") - if watch { - channelUpdates, err := api.MarketDataTransferUpdates(ctx) - if err != nil { - return err - } - - for { - tm.Clear() // Clear current screen - - tm.MoveCursor(1, 1) - - lcli.OutputDataTransferChannels(tm.Screen, toDTv2Channels(channels), verbose, completed, showFailed) - - tm.Flush() - - select { - case <-ctx.Done(): - return nil - case channelUpdate := <-channelUpdates: - var found bool - for i, existing := range channels { - if existing.TransferID == channelUpdate.TransferID && - existing.OtherPeer == channelUpdate.OtherPeer && - existing.IsSender == channelUpdate.IsSender && - existing.IsInitiator == channelUpdate.IsInitiator { - channels[i] = channelUpdate - found = true - break - } - } - if !found { - channels = append(channels, channelUpdate) - } - } - } - } - lcli.OutputDataTransferChannels(os.Stdout, toDTv2Channels(channels), verbose, completed, showFailed) - return nil - }, -} - -func toDTv2Channels(channels []bapi.DataTransferChannel) []api.DataTransferChannel { - v2chs := make([]api.DataTransferChannel, 0, len(channels)) - for _, ch := range channels { - v2chs = append(v2chs, api.DataTransferChannel{ - TransferID: datatransferv2.TransferID(ch.TransferID), - Status: datatransferv2.Status(ch.Status), - BaseCID: ch.BaseCID, - IsInitiator: ch.IsInitiator, - IsSender: ch.IsSender, - Voucher: ch.Voucher, - Message: ch.Message, - OtherPeer: ch.OtherPeer, - Transferred: ch.Transferred, - Stages: toDTv2Stages(ch.Stages), - }) - } - return v2chs -} - -func toDTv2Stages(stages *datatransfer.ChannelStages) *datatransferv2.ChannelStages { - if stages == nil { - return nil - } - - v2stgs := make([]*datatransferv2.ChannelStage, 0, len(stages.Stages)) - for _, s := range stages.Stages { - v2stgs = append(v2stgs, &datatransferv2.ChannelStage{ - Name: s.Name, - Description: s.Description, - CreatedTime: s.CreatedTime, - UpdatedTime: s.UpdatedTime, - Logs: toDTv2Logs(s.Logs), - }) - } - return &datatransferv2.ChannelStages{Stages: v2stgs} -} - -func toDTv2Logs(logs []*datatransfer.Log) []*datatransferv2.Log { - if logs == nil { - return nil - } - - v2logs := make([]*datatransferv2.Log, 0, len(logs)) - for _, l := range logs { - v2logs = append(v2logs, &datatransferv2.Log{Log: l.Log, UpdatedTime: l.UpdatedTime}) - } - - return v2logs -} - -var transfersDiagnosticsCmd = &cli.Command{ - Name: "diagnostics", - Usage: "Get detailed diagnostics on active transfers with a specific peer", - ArgsUsage: "", - Flags: []cli.Flag{}, - Action: func(cctx *cli.Context) error { - if !cctx.Args().Present() { - return cli.ShowCommandHelp(cctx, cctx.Command.Name) - } - api, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := lcli.ReqContext(cctx) - - targetPeer, err := peer.Decode(cctx.Args().First()) - if err != nil { - return err - } - diagnostics, err := api.MarketDataTransferDiagnostics(ctx, targetPeer) - if err != nil { - return err - } - out, err := json.MarshalIndent(diagnostics, "", "\t") - if err != nil { - return err - } - fmt.Println(string(out)) - return nil - }, -} diff --git a/cmd/boostd/legacy_retrieval_deals.go b/cmd/boostd/legacy_retrieval_deals.go deleted file mode 100644 index 7dca34ae2..000000000 --- a/cmd/boostd/legacy_retrieval_deals.go +++ /dev/null @@ -1,277 +0,0 @@ -package main - -import ( - "fmt" - "os" - "sort" - "text/tabwriter" - - "github.com/docker/go-units" - "github.com/filecoin-project/boost-gfm/retrievalmarket" - "github.com/filecoin-project/go-state-types/abi" - "github.com/urfave/cli/v2" - - bcli "github.com/filecoin-project/boost/cli" - "github.com/filecoin-project/lotus/chain/types" - lcli "github.com/filecoin-project/lotus/cli" -) - -var retrievalDealsCmd = &cli.Command{ - Name: "retrieval-deals", - Usage: "Manage legacy retrieval deals and related configuration (Markets V1)", - Category: "legacy", - Subcommands: []*cli.Command{ - retrievalDealSelectionCmd, - retrievalDealsListCmd, - retrievalSetAskCmd, - retrievalGetAskCmd, - }, -} - -var retrievalDealSelectionCmd = &cli.Command{ - Name: "selection", - Usage: "Configure acceptance criteria for retrieval deal proposals", - Subcommands: []*cli.Command{ - retrievalDealSelectionShowCmd, - retrievalDealSelectionResetCmd, - retrievalDealSelectionRejectCmd, - }, -} - -var retrievalDealSelectionShowCmd = &cli.Command{ - Name: "list", - Usage: "List retrieval deal proposal selection criteria", - Action: func(cctx *cli.Context) error { - smapi, closer, err := bcli.GetBoostAPI(cctx) - if err != nil { - return err - } - defer closer() - - onlineOk, err := smapi.DealsConsiderOnlineRetrievalDeals(lcli.DaemonContext(cctx)) - if err != nil { - return err - } - - offlineOk, err := smapi.DealsConsiderOfflineRetrievalDeals(lcli.DaemonContext(cctx)) - if err != nil { - return err - } - - fmt.Printf("considering online retrieval deals: %t\n", onlineOk) - fmt.Printf("considering offline retrieval deals: %t\n", offlineOk) - - return nil - }, -} - -var retrievalDealSelectionResetCmd = &cli.Command{ - Name: "reset", - Usage: "Reset retrieval deal proposal selection criteria to default values", - Action: func(cctx *cli.Context) error { - smapi, closer, err := bcli.GetBoostAPI(cctx) - if err != nil { - return err - } - defer closer() - - err = smapi.DealsSetConsiderOnlineRetrievalDeals(lcli.DaemonContext(cctx), true) - if err != nil { - return err - } - - err = smapi.DealsSetConsiderOfflineRetrievalDeals(lcli.DaemonContext(cctx), true) - if err != nil { - return err - } - - return nil - }, -} - -var retrievalDealSelectionRejectCmd = &cli.Command{ - Name: "reject", - Usage: "Configure criteria which necessitate automatic rejection", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "online", - }, - &cli.BoolFlag{ - Name: "offline", - }, - }, - Action: func(cctx *cli.Context) error { - smapi, closer, err := bcli.GetBoostAPI(cctx) - if err != nil { - return err - } - defer closer() - - if cctx.Bool("online") { - err = smapi.DealsSetConsiderOnlineRetrievalDeals(lcli.DaemonContext(cctx), false) - if err != nil { - return err - } - } - - if cctx.Bool("offline") { - err = smapi.DealsSetConsiderOfflineRetrievalDeals(lcli.DaemonContext(cctx), false) - if err != nil { - return err - } - } - - return nil - }, -} - -var retrievalDealsListCmd = &cli.Command{ - Name: "list", - Usage: "List all active retrieval deals for this miner", - Action: func(cctx *cli.Context) error { - api, closer, err := bcli.GetBoostAPI(cctx) - if err != nil { - return err - } - defer closer() - - deals, err := api.MarketListRetrievalDeals(lcli.DaemonContext(cctx)) - if err != nil { - return err - } - - sort.Slice(deals, func(i, j int) bool { - return deals[i].ID < deals[j].ID - }) - - w := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0) - - _, _ = fmt.Fprintf(w, "Receiver\tDealID\tPayload\tState\tPricePerByte\tBytesSent\tMessage\n") - - for _, deal := range deals { - payloadCid := deal.PayloadCID.String() - - _, _ = fmt.Fprintf(w, - "%s\t%d\t%s\t%s\t%s\t%d\t%s\n", - deal.Receiver.String(), - deal.ID, - "..."+payloadCid[len(payloadCid)-8:], - retrievalmarket.DealStatuses[deal.Status], - deal.PricePerByte.String(), - deal.TotalSent, - deal.Message, - ) - } - - return w.Flush() - }, -} - -var retrievalSetAskCmd = &cli.Command{ - Name: "set-ask", - Usage: "Configure the provider's retrieval ask", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "price", - Usage: "Set the price of the ask for retrievals (FIL/GiB)", - }, - &cli.StringFlag{ - Name: "unseal-price", - Usage: "Set the price to unseal", - }, - &cli.StringFlag{ - Name: "payment-interval", - Usage: "Set the payment interval (in bytes) for retrieval", - DefaultText: "1MiB", - }, - &cli.StringFlag{ - Name: "payment-interval-increase", - Usage: "Set the payment interval increase (in bytes) for retrieval", - DefaultText: "1MiB", - }, - }, - Action: func(cctx *cli.Context) error { - ctx := lcli.ReqContext(cctx) - - api, closer, err := bcli.GetBoostAPI(cctx) - if err != nil { - return err - } - defer closer() - - ask, err := api.MarketGetRetrievalAsk(ctx) - if err != nil { - return err - } - - if cctx.IsSet("price") { - v, err := types.ParseFIL(cctx.String("price")) - if err != nil { - return err - } - ask.PricePerByte = types.BigDiv(types.BigInt(v), types.NewInt(1<<30)) - } - - if cctx.IsSet("unseal-price") { - v, err := types.ParseFIL(cctx.String("unseal-price")) - if err != nil { - return err - } - ask.UnsealPrice = abi.TokenAmount(v) - } - - if cctx.IsSet("payment-interval") { - v, err := units.RAMInBytes(cctx.String("payment-interval")) - if err != nil { - return err - } - ask.PaymentInterval = uint64(v) - } - - if cctx.IsSet("payment-interval-increase") { - v, err := units.RAMInBytes(cctx.String("payment-interval-increase")) - if err != nil { - return err - } - ask.PaymentIntervalIncrease = uint64(v) - } - - return api.MarketSetRetrievalAsk(ctx, ask) - }, -} - -var retrievalGetAskCmd = &cli.Command{ - Name: "get-ask", - Usage: "Get the provider's current retrieval ask configured by the provider in the ask-store using the set-ask CLI command", - Flags: []cli.Flag{}, - Action: func(cctx *cli.Context) error { - ctx := lcli.ReqContext(cctx) - - api, closer, err := bcli.GetBoostAPI(cctx) - if err != nil { - return err - } - defer closer() - - ask, err := api.MarketGetRetrievalAsk(ctx) - if err != nil { - return err - } - - w := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0) - _, _ = fmt.Fprintf(w, "Price per Byte\tUnseal Price\tPayment Interval\tPayment Interval Increase\n") - if ask == nil { - _, _ = fmt.Fprintf(w, "\n") - return w.Flush() - } - - _, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", - types.FIL(ask.PricePerByte), - types.FIL(ask.UnsealPrice), - units.BytesSize(float64(ask.PaymentInterval)), - units.BytesSize(float64(ask.PaymentIntervalIncrease)), - ) - return w.Flush() - - }, -} diff --git a/cmd/boostd/recover.go b/cmd/boostd/recover.go index 04b155827..96e32bc70 100644 --- a/cmd/boostd/recover.go +++ b/cmd/boostd/recover.go @@ -13,9 +13,9 @@ import ( "time" "github.com/davecgh/go-spew/spew" - "github.com/filecoin-project/boost-gfm/piecestore" "github.com/filecoin-project/boost/cmd/lib" "github.com/filecoin-project/boost/db" + "github.com/filecoin-project/boost/markets/piecestore" "github.com/filecoin-project/boost/piecedirectory" bdclient "github.com/filecoin-project/boostd-data/client" "github.com/filecoin-project/boostd-data/model" diff --git a/cmd/booster-http/server.go b/cmd/booster-http/server.go index e02d3b5d4..1dbe887f2 100644 --- a/cmd/booster-http/server.go +++ b/cmd/booster-http/server.go @@ -13,7 +13,6 @@ import ( "github.com/NYTimes/gziphandler" "github.com/fatih/color" - "github.com/filecoin-project/boost-gfm/retrievalmarket" "github.com/filecoin-project/boost/metrics" "github.com/filecoin-project/boostd-data/model" "github.com/filecoin-project/boostd-data/shared/tracing" @@ -272,9 +271,6 @@ func isNotFoundError(err error) bool { if errors.Is(err, datastore.ErrNotFound) { return true } - if errors.Is(err, retrievalmarket.ErrNotFound) { - return true - } return strings.Contains(strings.ToLower(err.Error()), "not found") } diff --git a/cmd/boostx/stats_cmd.go b/cmd/boostx/stats_cmd.go index 1b8775955..9745cee3a 100644 --- a/cmd/boostx/stats_cmd.go +++ b/cmd/boostx/stats_cmd.go @@ -8,7 +8,7 @@ import ( "sync" "github.com/filecoin-project/boost/retrievalmarket/lp2pimpl" - transports_types "github.com/filecoin-project/boost/retrievalmarket/types" + transports_types "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" clinode "github.com/filecoin-project/boost/cli/node" "github.com/filecoin-project/boost/cmd" diff --git a/cmd/boostx/utils_cmd.go b/cmd/boostx/utils_cmd.go index 597b8f90b..f64fa9ec5 100644 --- a/cmd/boostx/utils_cmd.go +++ b/cmd/boostx/utils_cmd.go @@ -9,9 +9,9 @@ import ( "path/filepath" "time" - "github.com/filecoin-project/boost-gfm/stores" clinode "github.com/filecoin-project/boost/cli/node" "github.com/filecoin-project/boost/cmd" + "github.com/filecoin-project/boost/cmd/lib/stores" "github.com/filecoin-project/boost/node/config" "github.com/filecoin-project/boost/node/repo" "github.com/filecoin-project/boost/testutil" diff --git a/cmd/lib/common.go b/cmd/lib/common.go index d15386a95..46b5f07c2 100644 --- a/cmd/lib/common.go +++ b/cmd/lib/common.go @@ -5,9 +5,9 @@ import ( "errors" "fmt" - "github.com/filecoin-project/boost-gfm/piecestore" - piecestoreimpl "github.com/filecoin-project/boost-gfm/piecestore/impl" - "github.com/filecoin-project/boost-gfm/storagemarket" + "github.com/filecoin-project/boost/markets/piecestore" + "github.com/filecoin-project/boost/markets/piecestore/impl" + "github.com/filecoin-project/boost/storagemarket/types/legacytypes" vfsm "github.com/filecoin-project/go-ds-versioning/pkg/fsm" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-statemachine/fsm" @@ -59,7 +59,7 @@ func GetPropCidByChainDealID(ctx context.Context, ds *backupds.Datastore) (map[a } // Build a mapping of chain deal ID to proposal CID - var list []storagemarket.MinerDeal + var list []legacytypes.MinerDeal if err := deals.List(&list); err != nil { return nil, err } @@ -108,7 +108,7 @@ func getLegacyDealsFSM(ctx context.Context, ds *backupds.Datastore) (fsm.Group, // Get the deals FSM provDS := namespace.Wrap(ds, datastore.NewKey("/deals/provider")) deals, migrate, err := vfsm.NewVersionedFSM(provDS, fsm.Parameters{ - StateType: storagemarket.MinerDeal{}, + StateType: legacytypes.MinerDeal{}, StateKeyField: "State", }, nil, "2") if err != nil { diff --git a/cmd/lib/stores/dagstore.go b/cmd/lib/stores/dagstore.go new file mode 100644 index 000000000..ffdb4204f --- /dev/null +++ b/cmd/lib/stores/dagstore.go @@ -0,0 +1,86 @@ +package stores + +import ( + "context" + "io" + + "github.com/filecoin-project/boost/storagemarket/types/legacytypes" + bstore "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/go-cid" + carindex "github.com/ipld/go-car/v2/index" + + "github.com/filecoin-project/dagstore" +) + +type ClosableBlockstore interface { + bstore.Blockstore + io.Closer +} + +// DAGStoreWrapper hides the details of the DAG store implementation from +// the other parts of go-fil-markets. +type DAGStoreWrapper interface { + // RegisterShard loads a CAR file into the DAG store and builds an + // index for it, sending the result on the supplied channel on completion + RegisterShard(ctx context.Context, pieceCid cid.Cid, carPath string, eagerInit bool, resch chan dagstore.ShardResult) error + + // LoadShard fetches the data for a shard and provides a blockstore + // interface to it. + // + // The blockstore must be closed to release the shard. + LoadShard(ctx context.Context, pieceCid cid.Cid) (ClosableBlockstore, error) + + // MigrateDeals migrates the supplied storage deals into the DAG store. + MigrateDeals(ctx context.Context, deals []legacytypes.MinerDeal) (bool, error) + + // GetPiecesContainingBlock returns the CID of all pieces that contain + // the block with the given CID + GetPiecesContainingBlock(blockCID cid.Cid) ([]cid.Cid, error) + + GetIterableIndexForPiece(pieceCid cid.Cid) (carindex.IterableIndex, error) + + // DestroyShard initiates the registration of a new shard. + // + // This method returns an error synchronously if preliminary validation fails. + // Otherwise, it queues the shard for destruction. The caller should monitor + // supplied channel for a result. + DestroyShard(ctx context.Context, pieceCid cid.Cid, resch chan dagstore.ShardResult) error + + // Close closes the dag store wrapper. + Close() error +} + +// RegisterShardSync calls the DAGStore RegisterShard method and waits +// synchronously in a dedicated channel until the registration has completed +// fully. +func RegisterShardSync(ctx context.Context, ds DAGStoreWrapper, pieceCid cid.Cid, carPath string, eagerInit bool) error { + resch := make(chan dagstore.ShardResult, 1) + if err := ds.RegisterShard(ctx, pieceCid, carPath, eagerInit, resch); err != nil { + return err + } + + // TODO: Can I rely on RegisterShard to return an error if the context times out? + select { + case <-ctx.Done(): + return ctx.Err() + case res := <-resch: + return res.Error + } +} + +// DestroyShardSync calls the DAGStore DestroyShard method and waits +// synchronously in a dedicated channel until the shard has been destroyed completely. +func DestroyShardSync(ctx context.Context, ds DAGStoreWrapper, pieceCid cid.Cid) error { + resch := make(chan dagstore.ShardResult, 1) + + if err := ds.DestroyShard(ctx, pieceCid, resch); err != nil { + return err + } + + select { + case <-ctx.Done(): + return ctx.Err() + case res := <-resch: + return res.Error + } +} diff --git a/cmd/lib/stores/error.go b/cmd/lib/stores/error.go new file mode 100644 index 000000000..cc9a4767e --- /dev/null +++ b/cmd/lib/stores/error.go @@ -0,0 +1,9 @@ +package stores + +import "golang.org/x/xerrors" + +var ErrNotFound = xerrors.New("not found") + +func IsNotFound(err error) bool { + return xerrors.Is(err, ErrNotFound) +} diff --git a/cmd/lib/stores/filestore.go b/cmd/lib/stores/filestore.go new file mode 100644 index 000000000..31a324741 --- /dev/null +++ b/cmd/lib/stores/filestore.go @@ -0,0 +1,164 @@ +package stores + +import ( + "context" + + bstore "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/filestore" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/query" + carv2 "github.com/ipld/go-car/v2" + "github.com/ipld/go-car/v2/blockstore" + mh "github.com/multiformats/go-multihash" + "golang.org/x/xerrors" +) + +// ReadOnlyFilestore opens the CAR in the specified path as as a read-only +// blockstore, and fronts it with a Filestore whose positional mappings are +// stored inside the CAR itself. It must be closed after done. +func ReadOnlyFilestore(path string) (ClosableBlockstore, error) { + ro, err := OpenReadOnly(path, + carv2.ZeroLengthSectionAsEOF(true), + blockstore.UseWholeCIDs(true), + ) + + if err != nil { + return nil, err + } + + bs, err := FilestoreOf(ro) + if err != nil { + return nil, err + } + + return &closableBlockstore{Blockstore: bs, closeFn: ro.Close}, nil +} + +// ReadWriteFilestore opens the CAR in the specified path as as a read-write +// blockstore, and fronts it with a Filestore whose positional mappings are +// stored inside the CAR itself. It must be closed after done. Closing will +// finalize the CAR blockstore. +func ReadWriteFilestore(path string, roots ...cid.Cid) (ClosableBlockstore, error) { + rw, err := OpenReadWrite(path, roots, + carv2.ZeroLengthSectionAsEOF(true), + carv2.StoreIdentityCIDs(true), + blockstore.UseWholeCIDs(true), + ) + if err != nil { + return nil, err + } + + bs, err := FilestoreOf(rw) + if err != nil { + return nil, err + } + + return &closableBlockstore{Blockstore: bs, closeFn: rw.Finalize}, nil +} + +// FilestoreOf returns a FileManager/Filestore backed entirely by a +// blockstore without requiring a datastore. It achieves this by coercing the +// blockstore into a datastore. The resulting blockstore is suitable for usage +// with DagBuilderHelper with DagBuilderParams#NoCopy=true. +func FilestoreOf(bs bstore.Blockstore) (bstore.Blockstore, error) { + coercer := &dsCoercer{bs} + + // the FileManager stores positional infos (positional mappings) in a + // datastore, which in our case is the blockstore coerced into a datastore. + // + // Passing the root dir as a base path makes me uneasy, but these filestores + // are only used locally. + fm := filestore.NewFileManager(coercer, "/") + fm.AllowFiles = true + + // the Filestore sifts leaves (PosInfos) from intermediate nodes. It writes + // PosInfo leaves to the datastore (which in our case is the coerced + // blockstore), and the intermediate nodes to the blockstore proper (since + // they cannot be mapped to the file. + fstore := filestore.NewFilestore(bs, fm) + bs = bstore.NewIdStore(fstore) + + return bs, nil +} + +var cidBuilder = cid.V1Builder{Codec: cid.Raw, MhType: mh.SHA2_256} + +// dsCoercer coerces a Blockstore to present a datastore interface, apt for +// usage with the Filestore/FileManager. Only PosInfos will be written through +// this path. +type dsCoercer struct { + bstore.Blockstore +} + +var _ datastore.Batching = (*dsCoercer)(nil) + +func (crcr *dsCoercer) Get(ctx context.Context, key datastore.Key) (value []byte, err error) { + c, err := cidBuilder.Sum(key.Bytes()) + if err != nil { + return nil, xerrors.Errorf("failed to create cid: %w", err) + } + + blk, err := crcr.Blockstore.Get(ctx, c) + if err != nil { + return nil, xerrors.Errorf("failed to get cid %s: %w", c, err) + } + return blk.RawData(), nil +} + +func (crcr *dsCoercer) Put(ctx context.Context, key datastore.Key, value []byte) error { + c, err := cidBuilder.Sum(key.Bytes()) + if err != nil { + return xerrors.Errorf("failed to create cid: %w", err) + } + blk, err := blocks.NewBlockWithCid(value, c) + if err != nil { + return xerrors.Errorf("failed to create block: %w", err) + } + if err := crcr.Blockstore.Put(ctx, blk); err != nil { + return xerrors.Errorf("failed to put block: %w", err) + } + return nil +} + +func (crcr *dsCoercer) Has(ctx context.Context, key datastore.Key) (exists bool, err error) { + c, err := cidBuilder.Sum(key.Bytes()) + if err != nil { + return false, xerrors.Errorf("failed to create cid: %w", err) + } + return crcr.Blockstore.Has(ctx, c) +} + +func (crcr *dsCoercer) Batch(_ context.Context) (datastore.Batch, error) { + return datastore.NewBasicBatch(crcr), nil +} + +func (crcr *dsCoercer) GetSize(_ context.Context, _ datastore.Key) (size int, err error) { + return 0, xerrors.New("operation NOT supported: GetSize") +} + +func (crcr *dsCoercer) Query(_ context.Context, _ query.Query) (query.Results, error) { + return nil, xerrors.New("operation NOT supported: Query") +} + +func (crcr *dsCoercer) Delete(_ context.Context, _ datastore.Key) error { + return xerrors.New("operation NOT supported: Delete") +} + +func (crcr *dsCoercer) Sync(_ context.Context, _ datastore.Key) error { + return xerrors.New("operation NOT supported: Sync") +} + +func (crcr *dsCoercer) Close() error { + return nil +} + +type closableBlockstore struct { + bstore.Blockstore + closeFn func() error +} + +func (c *closableBlockstore) Close() error { + return c.closeFn() +} diff --git a/cmd/lib/stores/kvcarbs.go b/cmd/lib/stores/kvcarbs.go new file mode 100644 index 000000000..4f457d43a --- /dev/null +++ b/cmd/lib/stores/kvcarbs.go @@ -0,0 +1,1677 @@ +package stores + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "sync" + + blockstore "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/ipld/merkledag" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + format "github.com/ipfs/go-ipld-format" + "github.com/ipld/go-car/util" + carv2 "github.com/ipld/go-car/v2" + "github.com/ipld/go-car/v2/index" + "github.com/multiformats/go-multicodec" + "github.com/multiformats/go-multihash" + "github.com/multiformats/go-varint" + "github.com/petar/GoLLRB/llrb" + cborg "github.com/whyrusleeping/cbor/go" + "golang.org/x/exp/mmap" +) + +/* + + This file contains extracted parts of CARv2 blockstore, modified to allow + storage of arbitrary data indexed by ID CIDs. + + This was allowed by go-car prior to v2.1.0, but newer go-car releases + require that data matches the multihash, which means that the library can + no longer be exploited as a KV store as is done in filestore.go. + + We duplicate the code here temporarily, as an alternative to breaking + existing nodes, or adding an option to go-car which would break the CAR spec + (it also contains this hack to a single repo). + + Ideally we should migrate to a real KV store, but even for that we'll still + need this code for the migration process. + +*/ + +// Modified vs go-car/v2 +func isIdentity(cid.Cid) (digest []byte, ok bool, err error) { + /* + dmh, err := multihash.Decode(key.Hash()) + if err != nil { + return nil, false, err + } + ok = dmh.Code == multihash.IDENTITY + digest = dmh.Digest + return digest, ok, nil + */ + + // This is the hack filestore datastore needs to use CARs as a KV store + return nil, false, err +} + +// Code below was copied from go-car/v2 + +var ( + _ io.ReaderAt = (*OffsetReadSeeker)(nil) + _ io.ReadSeeker = (*OffsetReadSeeker)(nil) +) + +// OffsetReadSeeker implements Read, and ReadAt on a section +// of an underlying io.ReaderAt. +// The main difference between io.SectionReader and OffsetReadSeeker is that +// NewOffsetReadSeeker does not require the user to know the number of readable bytes. +// +// It also partially implements Seek, where the implementation panics if io.SeekEnd is passed. +// This is because, OffsetReadSeeker does not know the end of the file therefore cannot seek relative +// to it. +type OffsetReadSeeker struct { + r io.ReaderAt + base int64 + off int64 +} + +// NewOffsetReadSeeker returns an OffsetReadSeeker that reads from r +// starting offset offset off and stops with io.EOF when r reaches its end. +// The Seek function will panic if whence io.SeekEnd is passed. +func NewOffsetReadSeeker(r io.ReaderAt, off int64) *OffsetReadSeeker { + return &OffsetReadSeeker{r, off, off} +} + +func (o *OffsetReadSeeker) Read(p []byte) (n int, err error) { + n, err = o.r.ReadAt(p, o.off) + o.off += int64(n) + return +} + +func (o *OffsetReadSeeker) ReadAt(p []byte, off int64) (n int, err error) { + if off < 0 { + return 0, io.EOF + } + off += o.base + return o.r.ReadAt(p, off) +} + +func (o *OffsetReadSeeker) ReadByte() (byte, error) { + b := []byte{0} + _, err := o.Read(b) + return b[0], err +} + +func (o *OffsetReadSeeker) Offset() int64 { + return o.off +} + +func (o *OffsetReadSeeker) Seek(offset int64, whence int) (int64, error) { + switch whence { + case io.SeekStart: + o.off = offset + o.base + case io.SeekCurrent: + o.off += offset + case io.SeekEnd: + panic("unsupported whence: SeekEnd") + } + return o.Position(), nil +} + +// Position returns the current position of this reader relative to the initial offset. +func (o *OffsetReadSeeker) Position() int64 { + return o.off - o.base +} + +var ( + _ io.Writer = (*OffsetWriteSeeker)(nil) + _ io.WriteSeeker = (*OffsetWriteSeeker)(nil) +) + +type OffsetWriteSeeker struct { + w io.WriterAt + base int64 + offset int64 +} + +func NewOffsetWriter(w io.WriterAt, off int64) *OffsetWriteSeeker { + return &OffsetWriteSeeker{w, off, off} +} + +func (ow *OffsetWriteSeeker) Write(b []byte) (n int, err error) { + n, err = ow.w.WriteAt(b, ow.offset) + ow.offset += int64(n) + return +} + +func (ow *OffsetWriteSeeker) Seek(offset int64, whence int) (int64, error) { + switch whence { + case io.SeekStart: + ow.offset = offset + ow.base + case io.SeekCurrent: + ow.offset += offset + case io.SeekEnd: + panic("unsupported whence: SeekEnd") + } + return ow.Position(), nil +} + +// Position returns the current position of this writer relative to the initial offset, i.e. the number of bytes written. +func (ow *OffsetWriteSeeker) Position() int64 { + return ow.offset - ow.base +} + +type BytesReader interface { + io.Reader + io.ByteReader +} + +func ReadNode(r io.Reader, zeroLenAsEOF bool) (cid.Cid, []byte, error) { + data, err := LdRead(r, zeroLenAsEOF) + if err != nil { + return cid.Cid{}, nil, err + } + + n, c, err := cid.CidFromBytes(data) + if err != nil { + return cid.Cid{}, nil, err + } + + return c, data[n:], nil +} + +func LdWrite(w io.Writer, d ...[]byte) error { + var sum uint64 + for _, s := range d { + sum += uint64(len(s)) + } + + buf := make([]byte, 8) + n := varint.PutUvarint(buf, sum) + _, err := w.Write(buf[:n]) + if err != nil { + return err + } + + for _, s := range d { + _, err = w.Write(s) + if err != nil { + return err + } + } + + return nil +} + +func LdSize(d ...[]byte) uint64 { + var sum uint64 + for _, s := range d { + sum += uint64(len(s)) + } + s := varint.UvarintSize(sum) + return sum + uint64(s) +} + +func LdRead(r io.Reader, zeroLenAsEOF bool) ([]byte, error) { + l, err := varint.ReadUvarint(ToByteReader(r)) + if err != nil { + // If the length of bytes read is non-zero when the error is EOF then signal an unclean EOF. + if l > 0 && err == io.EOF { + return nil, io.ErrUnexpectedEOF + } + return nil, err + } else if l == 0 && zeroLenAsEOF { + return nil, io.EOF + } + + buf := make([]byte, l) + if _, err := io.ReadFull(r, buf); err != nil { + return nil, err + } + + return buf, nil +} + +var ( + _ io.ByteReader = (*readerPlusByte)(nil) + _ io.ByteReader = (*readSeekerPlusByte)(nil) + _ io.ByteReader = (*discardingReadSeekerPlusByte)(nil) + _ io.ReadSeeker = (*discardingReadSeekerPlusByte)(nil) + _ io.ReaderAt = (*readSeekerAt)(nil) +) + +type ( + readerPlusByte struct { + io.Reader + + byteBuf [1]byte // escapes via io.Reader.Read; preallocate + } + + readSeekerPlusByte struct { + io.ReadSeeker + + byteBuf [1]byte // escapes via io.Reader.Read; preallocate + } + + discardingReadSeekerPlusByte struct { + io.Reader + offset int64 + + byteBuf [1]byte // escapes via io.Reader.Read; preallocate + } + + ByteReadSeeker interface { + io.ReadSeeker + io.ByteReader + } + + readSeekerAt struct { + rs io.ReadSeeker + mu sync.Mutex + } +) + +func ToByteReader(r io.Reader) io.ByteReader { + if br, ok := r.(io.ByteReader); ok { + return br + } + return &readerPlusByte{Reader: r} +} + +func ToByteReadSeeker(r io.Reader) ByteReadSeeker { + if brs, ok := r.(ByteReadSeeker); ok { + return brs + } + if rs, ok := r.(io.ReadSeeker); ok { + return &readSeekerPlusByte{ReadSeeker: rs} + } + return &discardingReadSeekerPlusByte{Reader: r} +} + +func ToReaderAt(rs io.ReadSeeker) io.ReaderAt { + if ra, ok := rs.(io.ReaderAt); ok { + return ra + } + return &readSeekerAt{rs: rs} +} + +func (rb *readerPlusByte) ReadByte() (byte, error) { + _, err := io.ReadFull(rb, rb.byteBuf[:]) + return rb.byteBuf[0], err +} + +func (rsb *readSeekerPlusByte) ReadByte() (byte, error) { + _, err := io.ReadFull(rsb, rsb.byteBuf[:]) + return rsb.byteBuf[0], err +} + +func (drsb *discardingReadSeekerPlusByte) ReadByte() (byte, error) { + _, err := io.ReadFull(drsb, drsb.byteBuf[:]) + return drsb.byteBuf[0], err +} + +func (drsb *discardingReadSeekerPlusByte) Read(p []byte) (read int, err error) { + read, err = drsb.Reader.Read(p) + drsb.offset += int64(read) + return +} + +func (drsb *discardingReadSeekerPlusByte) Seek(offset int64, whence int) (int64, error) { + switch whence { + case io.SeekStart: + n := offset - drsb.offset + if n < 0 { + panic("unsupported rewind via whence: io.SeekStart") + } + _, err := io.CopyN(ioutil.Discard, drsb, n) + return drsb.offset, err + case io.SeekCurrent: + _, err := io.CopyN(ioutil.Discard, drsb, offset) + return drsb.offset, err + default: + panic("unsupported whence: io.SeekEnd") + } +} + +func (rsa *readSeekerAt) ReadAt(p []byte, off int64) (n int, err error) { + rsa.mu.Lock() + defer rsa.mu.Unlock() + if _, err := rsa.rs.Seek(off, io.SeekStart); err != nil { + return 0, err + } + return rsa.rs.Read(p) +} + +func init() { + cbor.RegisterCborType(CarHeader{}) +} + +type Store interface { + Put(blocks.Block) error +} + +type ReadStore interface { + Get(cid.Cid) (blocks.Block, error) +} + +type CarHeader struct { + Roots []cid.Cid + Version uint64 +} + +type carWriter struct { + ds format.NodeGetter + w io.Writer +} + +func WriteCar(ctx context.Context, ds format.NodeGetter, roots []cid.Cid, w io.Writer) error { + h := &CarHeader{ + Roots: roots, + Version: 1, + } + + if err := WriteHeader(h, w); err != nil { + return fmt.Errorf("failed to write car header: %s", err) + } + + cw := &carWriter{ds: ds, w: w} + seen := cid.NewSet() + for _, r := range roots { + if err := merkledag.Walk(ctx, cw.enumGetLinks, r, seen.Visit); err != nil { + return err + } + } + return nil +} + +func ReadHeader(r io.Reader) (*CarHeader, error) { + hb, err := LdRead(r, false) + if err != nil { + return nil, err + } + + var ch CarHeader + if err := cbor.DecodeInto(hb, &ch); err != nil { + return nil, fmt.Errorf("invalid header: %v", err) + } + + return &ch, nil +} + +func WriteHeader(h *CarHeader, w io.Writer) error { + hb, err := cbor.DumpObject(h) + if err != nil { + return err + } + + return util.LdWrite(w, hb) +} + +func HeaderSize(h *CarHeader) (uint64, error) { + hb, err := cbor.DumpObject(h) + if err != nil { + return 0, err + } + + return util.LdSize(hb), nil +} + +func (cw *carWriter) enumGetLinks(ctx context.Context, c cid.Cid) ([]*format.Link, error) { + nd, err := cw.ds.Get(ctx, c) + if err != nil { + return nil, err + } + + if err := cw.writeNode(ctx, nd); err != nil { + return nil, err + } + + return nd.Links(), nil +} + +func (cw *carWriter) writeNode(ctx context.Context, nd format.Node) error { + return util.LdWrite(cw.w, nd.Cid().Bytes(), nd.RawData()) +} + +type CarReader struct { + r io.Reader + Header *CarHeader + zeroLenAsEOF bool +} + +func NewCarReaderWithZeroLengthSectionAsEOF(r io.Reader) (*CarReader, error) { + return newCarReader(r, true) +} + +func NewCarReader(r io.Reader) (*CarReader, error) { + return newCarReader(r, false) +} + +func newCarReader(r io.Reader, zeroLenAsEOF bool) (*CarReader, error) { + ch, err := ReadHeader(r) + if err != nil { + return nil, err + } + + if ch.Version != 1 { + return nil, fmt.Errorf("invalid car version: %d", ch.Version) + } + + if len(ch.Roots) == 0 { + return nil, fmt.Errorf("empty car, no roots") + } + + return &CarReader{ + r: r, + Header: ch, + zeroLenAsEOF: zeroLenAsEOF, + }, nil +} + +func (cr *CarReader) Next() (blocks.Block, error) { + c, data, err := ReadNode(cr.r, cr.zeroLenAsEOF) + if err != nil { + return nil, err + } + + hashed, err := c.Prefix().Sum(data) + if err != nil { + return nil, err + } + + if !hashed.Equals(c) { + return nil, fmt.Errorf("mismatch in content integrity, name: %s, data: %s", c, hashed) + } + + return blocks.NewBlockWithCid(data, c) +} + +type batchStore interface { + PutMany([]blocks.Block) error +} + +func LoadCar(s Store, r io.Reader) (*CarHeader, error) { + cr, err := NewCarReader(r) + if err != nil { + return nil, err + } + + if bs, ok := s.(batchStore); ok { + return loadCarFast(bs, cr) + } + + return loadCarSlow(s, cr) +} + +func loadCarFast(s batchStore, cr *CarReader) (*CarHeader, error) { + var buf []blocks.Block + for { + blk, err := cr.Next() + if err != nil { + if err == io.EOF { + if len(buf) > 0 { + if err := s.PutMany(buf); err != nil { + return nil, err + } + } + return cr.Header, nil + } + return nil, err + } + + buf = append(buf, blk) + + if len(buf) > 1000 { + if err := s.PutMany(buf); err != nil { + return nil, err + } + buf = buf[:0] + } + } +} + +func loadCarSlow(s Store, cr *CarReader) (*CarHeader, error) { + for { + blk, err := cr.Next() + if err != nil { + if err == io.EOF { + return cr.Header, nil + } + return nil, err + } + + if err := s.Put(blk); err != nil { + return nil, err + } + } +} + +// Matches checks whether two headers match. +// Two headers are considered matching if: +// 1. They have the same version number, and +// 2. They contain the same root CIDs in any order. +// +// Note, this function explicitly ignores the order of roots. +// If order of roots matter use reflect.DeepEqual instead. +func (h CarHeader) Matches(other CarHeader) bool { + if h.Version != other.Version { + return false + } + thisLen := len(h.Roots) + if thisLen != len(other.Roots) { + return false + } + // Headers with a single root are popular. + // Implement a fast execution path for popular cases. + if thisLen == 1 { + return h.Roots[0].Equals(other.Roots[0]) + } + + // Check other contains all roots. + // TODO: should this be optimised for cases where the number of roots are large since it has O(N^2) complexity? + for _, r := range h.Roots { + if !other.containsRoot(r) { + return false + } + } + return true +} + +func (h *CarHeader) containsRoot(root cid.Cid) bool { + for _, r := range h.Roots { + if r.Equals(root) { + return true + } + } + return false +} + +var _ blockstore.Blockstore = (*ReadOnly)(nil) + +var ( + errZeroLengthSection = fmt.Errorf("zero-length carv2 section not allowed by default; see WithZeroLengthSectionAsEOF option") + errReadOnly = fmt.Errorf("called write method on a read-only carv2 blockstore") + errClosed = fmt.Errorf("cannot use a carv2 blockstore after closing") +) + +// ReadOnly provides a read-only CAR Block Store. +type ReadOnly struct { + // mu allows ReadWrite to be safe for concurrent use. + // It's in ReadOnly so that read operations also grab read locks, + // given that ReadWrite embeds ReadOnly for methods like Get and Has. + // + // The main fields guarded by the mutex are the index and the underlying writers. + // For simplicity, the entirety of the blockstore methods grab the mutex. + mu sync.RWMutex + + // When true, the blockstore has been closed via Close, Discard, or + // Finalize, and must not be used. Any further blockstore method calls + // will return errClosed to avoid panics or broken behavior. + closed bool + + // The backing containing the data payload in CARv1 format. + backing io.ReaderAt + // The CARv1 content index. + idx index.Index + + // If we called carv2.NewReaderMmap, remember to close it too. + carv2Closer io.Closer + + opts carv2.Options +} + +type contextKey string + +const asyncErrHandlerKey contextKey = "asyncErrorHandlerKey" + +// UseWholeCIDs is a read option which makes a CAR blockstore identify blocks by +// whole CIDs, and not just their multihashes. The default is to use +// multihashes, which matches the current semantics of go-ipfs-blockstore v1. +// +// Enabling this option affects a number of methods, including read-only ones: +// +// • Get, Has, and HasSize will only return a block +// only if the entire CID is present in the CAR file. +// +// • AllKeysChan will return the original whole CIDs, instead of with their +// multicodec set to "raw" to just provide multihashes. +// +// • If AllowDuplicatePuts isn't set, +// Put and PutMany will deduplicate by the whole CID, +// allowing different CIDs with equal multihashes. +// +// Note that this option only affects the blockstore, and is ignored by the root +// go-car/v2 package. +func UseWholeCIDs(enable bool) carv2.Option { + return func(o *carv2.Options) { + o.BlockstoreUseWholeCIDs = enable + } +} + +// NewReadOnly creates a new ReadOnly blockstore from the backing with a optional index as idx. +// This function accepts both CARv1 and CARv2 backing. +// The blockstore is instantiated with the given index if it is not nil. +// +// Otherwise: +// * For a CARv1 backing an index is generated. +// * For a CARv2 backing an index is only generated if Header.HasIndex returns false. +// +// There is no need to call ReadOnly.Close on instances returned by this function. +func NewReadOnly(backing io.ReaderAt, idx index.Index, opts ...carv2.Option) (*ReadOnly, error) { + b := &ReadOnly{ + opts: carv2.ApplyOptions(opts...), + } + + version, err := readVersion(backing) + if err != nil { + return nil, err + } + switch version { + case 1: + if idx == nil { + if idx, err = generateIndex(backing, opts...); err != nil { + return nil, err + } + } + b.backing = backing + b.idx = idx + return b, nil + case 2: + v2r, err := carv2.NewReader(backing, opts...) + if err != nil { + return nil, err + } + if idx == nil { + if v2r.Header.HasIndex() { + r, err := v2r.IndexReader() + if err != nil { + return nil, err + } + idx, err = index.ReadFrom(r) + if err != nil { + return nil, err + } + } else { + r, err := v2r.DataReader() + if err != nil { + return nil, err + } + idx, err = generateIndex(r, opts...) + if err != nil { + return nil, err + } + } + } + drBacking, err := v2r.DataReader() + if err != nil { + return nil, err + } + b.backing = drBacking + b.idx = idx + return b, nil + default: + return nil, fmt.Errorf("unsupported car version: %v", version) + } +} + +func readVersion(at io.ReaderAt) (uint64, error) { + var rr io.Reader + switch r := at.(type) { + case io.Reader: + rr = r + default: + rr = NewOffsetReadSeeker(r, 0) + } + return carv2.ReadVersion(rr) +} + +func generateIndex(at io.ReaderAt, opts ...carv2.Option) (index.Index, error) { + var rs io.ReadSeeker + switch r := at.(type) { + case io.ReadSeeker: + rs = r + default: + rs = NewOffsetReadSeeker(r, 0) + } + + // Note, we do not set any write options so that all write options fall back onto defaults. + return carv2.GenerateIndex(rs, opts...) +} + +// OpenReadOnly opens a read-only blockstore from a CAR file (either v1 or v2), generating an index if it does not exist. +// Note, the generated index if the index does not exist is ephemeral and only stored in memory. +// See car.GenerateIndex and Index.Attach for persisting index onto a CAR file. +func OpenReadOnly(path string, opts ...carv2.Option) (*ReadOnly, error) { + f, err := mmap.Open(path) + if err != nil { + return nil, err + } + + robs, err := NewReadOnly(f, nil, opts...) + if err != nil { + return nil, err + } + robs.carv2Closer = f + + return robs, nil +} + +func (b *ReadOnly) readBlock(idx int64) (cid.Cid, []byte, error) { + bcid, data, err := ReadNode(NewOffsetReadSeeker(b.backing, idx), b.opts.ZeroLengthSectionAsEOF) + return bcid, data, err +} + +// DeleteBlock is unsupported and always errors. +func (b *ReadOnly) DeleteBlock(_ context.Context, _ cid.Cid) error { + return errReadOnly +} + +// Has indicates if the store contains a block that corresponds to the given key. +// This function always returns true for any given key with multihash.IDENTITY code. +func (b *ReadOnly) Has(ctx context.Context, key cid.Cid) (bool, error) { + // Check if the given CID has multihash.IDENTITY code + // Note, we do this without locking, since there is no shared information to lock for in order to perform the check. + if _, ok, err := isIdentity(key); err != nil { + return false, err + } else if ok { + return true, nil + } + + b.mu.RLock() + defer b.mu.RUnlock() + + if b.closed { + return false, errClosed + } + + var fnFound bool + var fnErr error + err := b.idx.GetAll(key, func(offset uint64) bool { + uar := NewOffsetReadSeeker(b.backing, int64(offset)) + var err error + _, err = varint.ReadUvarint(uar) + if err != nil { + fnErr = err + return false + } + _, readCid, err := cid.CidFromReader(uar) + if err != nil { + fnErr = err + return false + } + if b.opts.BlockstoreUseWholeCIDs { + fnFound = readCid.Equals(key) + return !fnFound // continue looking if we haven't found it + } else { + fnFound = bytes.Equal(readCid.Hash(), key.Hash()) + return false + } + }) + if errors.Is(err, index.ErrNotFound) { + return false, nil + } else if err != nil { + return false, err + } + return fnFound, fnErr +} + +// Get gets a block corresponding to the given key. +// This API will always return true if the given key has multihash.IDENTITY code. +func (b *ReadOnly) Get(ctx context.Context, key cid.Cid) (blocks.Block, error) { + // Check if the given CID has multihash.IDENTITY code + // Note, we do this without locking, since there is no shared information to lock for in order to perform the check. + if digest, ok, err := isIdentity(key); err != nil { + return nil, err + } else if ok { + return blocks.NewBlockWithCid(digest, key) + } + + b.mu.RLock() + defer b.mu.RUnlock() + + if b.closed { + return nil, errClosed + } + + var fnData []byte + var fnErr error + err := b.idx.GetAll(key, func(offset uint64) bool { + readCid, data, err := b.readBlock(int64(offset)) + if err != nil { + fnErr = err + return false + } + if b.opts.BlockstoreUseWholeCIDs { + if readCid.Equals(key) { + fnData = data + return false + } else { + return true // continue looking + } + } else { + if bytes.Equal(readCid.Hash(), key.Hash()) { + fnData = data + } + return false + } + }) + if errors.Is(err, index.ErrNotFound) { + return nil, format.ErrNotFound{Cid: key} + } else if err != nil { + return nil, err + } else if fnErr != nil { + return nil, fnErr + } + if fnData == nil { + return nil, format.ErrNotFound{Cid: key} + } + return blocks.NewBlockWithCid(fnData, key) +} + +// GetSize gets the size of an item corresponding to the given key. +func (b *ReadOnly) GetSize(ctx context.Context, key cid.Cid) (int, error) { + // Check if the given CID has multihash.IDENTITY code + // Note, we do this without locking, since there is no shared information to lock for in order to perform the check. + if digest, ok, err := isIdentity(key); err != nil { + return 0, err + } else if ok { + return len(digest), nil + } + + b.mu.RLock() + defer b.mu.RUnlock() + + if b.closed { + return 0, errClosed + } + + fnSize := -1 + var fnErr error + err := b.idx.GetAll(key, func(offset uint64) bool { + rdr := NewOffsetReadSeeker(b.backing, int64(offset)) + sectionLen, err := varint.ReadUvarint(rdr) + if err != nil { + fnErr = err + return false + } + cidLen, readCid, err := cid.CidFromReader(rdr) + if err != nil { + fnErr = err + return false + } + if b.opts.BlockstoreUseWholeCIDs { + if readCid.Equals(key) { + fnSize = int(sectionLen) - cidLen + return false + } else { + return true // continue looking + } + } else { + if bytes.Equal(readCid.Hash(), key.Hash()) { + fnSize = int(sectionLen) - cidLen + } + return false + } + }) + if errors.Is(err, index.ErrNotFound) { + return -1, format.ErrNotFound{Cid: key} + } else if err != nil { + return -1, err + } else if fnErr != nil { + return -1, fnErr + } + if fnSize == -1 { + return -1, format.ErrNotFound{Cid: key} + } + return fnSize, nil +} + +// Put is not supported and always returns an error. +func (b *ReadOnly) Put(context.Context, blocks.Block) error { + return errReadOnly +} + +// PutMany is not supported and always returns an error. +func (b *ReadOnly) PutMany(context.Context, []blocks.Block) error { + return errReadOnly +} + +// WithAsyncErrorHandler returns a context with async error handling set to the given errHandler. +// Any errors that occur during asynchronous operations of AllKeysChan will be passed to the given +// handler. +func WithAsyncErrorHandler(ctx context.Context, errHandler func(error)) context.Context { + return context.WithValue(ctx, asyncErrHandlerKey, errHandler) +} + +// AllKeysChan returns the list of keys in the CAR data payload. +// If the ctx is constructed using WithAsyncErrorHandler any errors that occur during asynchronous +// retrieval of CIDs will be passed to the error handler function set in context. +// Otherwise, errors will terminate the asynchronous operation silently. +// +// See WithAsyncErrorHandler +func (b *ReadOnly) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + // We release the lock when the channel-sending goroutine stops. + // Note that we can't use a deferred unlock here, + // because if we return a nil error, + // we only want to unlock once the async goroutine has stopped. + b.mu.RLock() + + if b.closed { + b.mu.RUnlock() // don't hold the mutex forever + return nil, errClosed + } + + // TODO we may use this walk for populating the index, and we need to be able to iterate keys in this way somewhere for index generation. In general though, when it's asked for all keys from a blockstore with an index, we should iterate through the index when possible rather than linear reads through the full car. + rdr := NewOffsetReadSeeker(b.backing, 0) + header, err := ReadHeader(rdr) + if err != nil { + b.mu.RUnlock() // don't hold the mutex forever + return nil, fmt.Errorf("error reading car header: %w", err) + } + headerSize, err := HeaderSize(header) + if err != nil { + b.mu.RUnlock() // don't hold the mutex forever + return nil, err + } + + // TODO: document this choice of 5, or use simpler buffering like 0 or 1. + ch := make(chan cid.Cid, 5) + + // Seek to the end of header. + if _, err = rdr.Seek(int64(headerSize), io.SeekStart); err != nil { + b.mu.RUnlock() // don't hold the mutex forever + return nil, err + } + + go func() { + defer b.mu.RUnlock() + defer close(ch) + + for { + length, err := varint.ReadUvarint(rdr) + if err != nil { + if err != io.EOF { + maybeReportError(ctx, err) + } + return + } + + // Null padding; by default it's an error. + if length == 0 { + if b.opts.ZeroLengthSectionAsEOF { + break + } else { + maybeReportError(ctx, errZeroLengthSection) + return + } + } + + thisItemForNxt := rdr.Offset() + _, c, err := cid.CidFromReader(rdr) + if err != nil { + maybeReportError(ctx, err) + return + } + if _, err := rdr.Seek(thisItemForNxt+int64(length), io.SeekStart); err != nil { + maybeReportError(ctx, err) + return + } + + // If we're just using multihashes, flatten to the "raw" codec. + if !b.opts.BlockstoreUseWholeCIDs { + c = cid.NewCidV1(cid.Raw, c.Hash()) + } + + select { + case ch <- c: + case <-ctx.Done(): + maybeReportError(ctx, ctx.Err()) + return + } + } + }() + return ch, nil +} + +// maybeReportError checks if an error handler is present in context associated to the key +// asyncErrHandlerKey, and if preset it will pass the error to it. +func maybeReportError(ctx context.Context, err error) { + value := ctx.Value(asyncErrHandlerKey) + if eh, _ := value.(func(error)); eh != nil { + eh(err) + } +} + +// HashOnRead is currently unimplemented; hashing on reads never happens. +func (b *ReadOnly) HashOnRead(bool) { + // TODO: implement before the final release? +} + +// Roots returns the root CIDs of the backing CAR. +func (b *ReadOnly) Roots() ([]cid.Cid, error) { + header, err := ReadHeader(NewOffsetReadSeeker(b.backing, 0)) + if err != nil { + return nil, fmt.Errorf("error reading car header: %w", err) + } + return header.Roots, nil +} + +// Close closes the underlying reader if it was opened by OpenReadOnly. +// After this call, the blockstore can no longer be used. +// +// Note that this call may block if any blockstore operations are currently in +// progress, including an AllKeysChan that hasn't been fully consumed or cancelled. +func (b *ReadOnly) Close() error { + b.mu.Lock() + defer b.mu.Unlock() + + return b.closeWithoutMutex() +} + +func (b *ReadOnly) closeWithoutMutex() error { + b.closed = true + if b.carv2Closer != nil { + return b.carv2Closer.Close() + } + return nil +} + +var ( + errUnsupported = errors.New("not supported") + insertionIndexCodec = multicodec.Code(0x300003) +) + +type ( + insertionIndex struct { + items llrb.LLRB + } + + recordDigest struct { + digest []byte + index.Record + } +) + +func (r recordDigest) Less(than llrb.Item) bool { + other, ok := than.(recordDigest) + if !ok { + return false + } + return bytes.Compare(r.digest, other.digest) < 0 +} + +func newRecordDigest(r index.Record) recordDigest { + d, err := multihash.Decode(r.Hash()) + if err != nil { + panic(err) + } + + return recordDigest{d.Digest, r} +} + +func newRecordFromCid(c cid.Cid, at uint64) recordDigest { + d, err := multihash.Decode(c.Hash()) + if err != nil { + panic(err) + } + + return recordDigest{d.Digest, index.Record{Cid: c, Offset: at}} +} + +func (ii *insertionIndex) insertNoReplace(key cid.Cid, n uint64) { + ii.items.InsertNoReplace(newRecordFromCid(key, n)) +} + +func (ii *insertionIndex) Get(c cid.Cid) (uint64, error) { + d, err := multihash.Decode(c.Hash()) + if err != nil { + return 0, err + } + entry := recordDigest{digest: d.Digest} + e := ii.items.Get(entry) + if e == nil { + return 0, index.ErrNotFound + } + r, ok := e.(recordDigest) + if !ok { + return 0, errUnsupported + } + + return r.Record.Offset, nil +} + +func (ii *insertionIndex) GetAll(c cid.Cid, fn func(uint64) bool) error { + d, err := multihash.Decode(c.Hash()) + if err != nil { + return err + } + entry := recordDigest{digest: d.Digest} + + any := false + iter := func(i llrb.Item) bool { + existing := i.(recordDigest) + if !bytes.Equal(existing.digest, entry.digest) { + // We've already looked at all entries with matching digests. + return false + } + any = true + return fn(existing.Record.Offset) + } + ii.items.AscendGreaterOrEqual(entry, iter) + if !any { + return index.ErrNotFound + } + return nil +} + +func (ii *insertionIndex) Marshal(w io.Writer) (uint64, error) { + l := uint64(0) + if err := binary.Write(w, binary.LittleEndian, int64(ii.items.Len())); err != nil { + return l, err + } + + l += 8 + var err error + iter := func(i llrb.Item) bool { + if err = cborg.Encode(w, i.(recordDigest).Record); err != nil { + return false + } + return true + } + ii.items.AscendGreaterOrEqual(ii.items.Min(), iter) + return l, err +} + +func (ii *insertionIndex) Unmarshal(r io.Reader) error { + var length int64 + if err := binary.Read(r, binary.LittleEndian, &length); err != nil { + return err + } + d := cborg.NewDecoder(r) + for i := int64(0); i < length; i++ { + var rec index.Record + if err := d.Decode(&rec); err != nil { + return err + } + ii.items.InsertNoReplace(newRecordDigest(rec)) + } + return nil +} + +func (ii *insertionIndex) Codec() multicodec.Code { + return insertionIndexCodec +} + +func (ii *insertionIndex) Load(rs []index.Record) error { + for _, r := range rs { + rec := newRecordDigest(r) + if rec.digest == nil { + return fmt.Errorf("invalid entry: %v", r) + } + ii.items.InsertNoReplace(rec) + } + return nil +} + +func newInsertionIndex() *insertionIndex { + return &insertionIndex{} +} + +// flatten returns a formatted index in the given codec for more efficient subsequent loading. +func (ii *insertionIndex) flatten(codec multicodec.Code) (index.Index, error) { + si, err := index.New(codec) + if err != nil { + return nil, err + } + rcrds := make([]index.Record, ii.items.Len()) + + idx := 0 + iter := func(i llrb.Item) bool { + rcrds[idx] = i.(recordDigest).Record + idx++ + return true + } + ii.items.AscendGreaterOrEqual(ii.items.Min(), iter) + + if err := si.Load(rcrds); err != nil { + return nil, err + } + return si, nil +} + +// note that hasExactCID is very similar to GetAll, +// but it's separate as it allows us to compare Record.Cid directly, +// whereas GetAll just provides Record.Offset. + +func (ii *insertionIndex) hasExactCID(c cid.Cid) bool { + d, err := multihash.Decode(c.Hash()) + if err != nil { + panic(err) + } + entry := recordDigest{digest: d.Digest} + + found := false + iter := func(i llrb.Item) bool { + existing := i.(recordDigest) + if !bytes.Equal(existing.digest, entry.digest) { + // We've already looked at all entries with matching digests. + return false + } + if existing.Record.Cid == c { + // We found an exact match. + found = true + return false + } + // Continue looking in ascending order. + return true + } + ii.items.AscendGreaterOrEqual(entry, iter) + return found +} + +var _ blockstore.Blockstore = (*ReadWrite)(nil) + +// ReadWrite implements a blockstore that stores blocks in CARv2 format. +// Blocks put into the blockstore can be read back once they are successfully written. +// This implementation is preferable for a write-heavy workload. +// The blocks are written immediately on Put and PutAll calls, while the index is stored in memory +// and updated incrementally. +// +// The Finalize function must be called once the putting blocks are finished. +// Upon calling Finalize header is finalized and index is written out. +// Once finalized, all read and write calls to this blockstore will result in errors. +type ReadWrite struct { + ronly ReadOnly + + f *os.File + dataWriter *OffsetWriteSeeker + idx *insertionIndex + header carv2.Header + + opts carv2.Options +} + +// AllowDuplicatePuts is a write option which makes a CAR blockstore not +// deduplicate blocks in Put and PutMany. The default is to deduplicate, +// which matches the current semantics of go-ipfs-blockstore v1. +// +// Note that this option only affects the blockstore, and is ignored by the root +// go-car/v2 package. +func AllowDuplicatePuts(allow bool) carv2.Option { + return func(o *carv2.Options) { + o.BlockstoreAllowDuplicatePuts = allow + } +} + +// OpenReadWrite creates a new ReadWrite at the given path with a provided set of root CIDs and options. +// +// ReadWrite.Finalize must be called once putting and reading blocks are no longer needed. +// Upon calling ReadWrite.Finalize the CARv2 header and index are written out onto the file and the +// backing file is closed. Once finalized, all read and write calls to this blockstore will result +// in errors. Note, ReadWrite.Finalize must be called on an open instance regardless of whether any +// blocks were put or not. +// +// If a file at given path does not exist, the instantiation will write car.Pragma and data payload +// header (i.e. the inner CARv1 header) onto the file before returning. +// +// When the given path already exists, the blockstore will attempt to resume from it. +// On resumption the existing data sections in file are re-indexed, allowing the caller to continue +// putting any remaining blocks without having to re-ingest blocks for which previous ReadWrite.Put +// returned successfully. +// +// Resumption only works on files that were created by a previous instance of a ReadWrite +// blockstore. This means a file created as a result of a successful call to OpenReadWrite can be +// resumed from as long as write operations such as ReadWrite.Put, ReadWrite.PutMany returned +// successfully. On resumption the roots argument and WithDataPadding option must match the +// previous instantiation of ReadWrite blockstore that created the file. More explicitly, the file +// resuming from must: +// 1. start with a complete CARv2 car.Pragma. +// 2. contain a complete CARv1 data header with root CIDs matching the CIDs passed to the +// constructor, starting at offset optionally padded by WithDataPadding, followed by zero or +// more complete data sections. If any corrupt data sections are present the resumption will fail. +// Note, if set previously, the blockstore must use the same WithDataPadding option as before, +// since this option is used to locate the CARv1 data payload. +// +// Note, resumption should be used with WithCidDeduplication, so that blocks that are successfully +// written into the file are not re-written. Unless, the user explicitly wants duplicate blocks. +// +// Resuming from finalized files is allowed. However, resumption will regenerate the index +// regardless by scanning every existing block in file. +func OpenReadWrite(path string, roots []cid.Cid, opts ...carv2.Option) (*ReadWrite, error) { + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0o666) // TODO: Should the user be able to configure FileMode permissions? + if err != nil { + return nil, fmt.Errorf("could not open read/write file: %w", err) + } + stat, err := f.Stat() + if err != nil { + // Note, we should not get a an os.ErrNotExist here because the flags used to open file includes os.O_CREATE + return nil, err + } + // Try and resume by default if the file size is non-zero. + resume := stat.Size() != 0 + // If construction of blockstore fails, make sure to close off the open file. + defer func() { + if err != nil { + f.Close() + } + }() + + // Instantiate block store. + // Set the header fileld before applying options since padding options may modify header. + rwbs := &ReadWrite{ + f: f, + idx: newInsertionIndex(), + header: carv2.NewHeader(0), + opts: carv2.ApplyOptions(opts...), + } + rwbs.ronly.opts = rwbs.opts + + if p := rwbs.opts.DataPadding; p > 0 { + rwbs.header = rwbs.header.WithDataPadding(p) + } + if p := rwbs.opts.IndexPadding; p > 0 { + rwbs.header = rwbs.header.WithIndexPadding(p) + } + + rwbs.dataWriter = NewOffsetWriter(rwbs.f, int64(rwbs.header.DataOffset)) + v1r := NewOffsetReadSeeker(rwbs.f, int64(rwbs.header.DataOffset)) + rwbs.ronly.backing = v1r + rwbs.ronly.idx = rwbs.idx + rwbs.ronly.carv2Closer = rwbs.f + + if resume { + if err = rwbs.resumeWithRoots(roots); err != nil { + return nil, err + } + } else { + if err = rwbs.initWithRoots(roots); err != nil { + return nil, err + } + } + + return rwbs, nil +} + +func (b *ReadWrite) initWithRoots(roots []cid.Cid) error { + if _, err := b.f.WriteAt(carv2.Pragma, 0); err != nil { + return err + } + return WriteHeader(&CarHeader{Roots: roots, Version: 1}, b.dataWriter) +} + +func (b *ReadWrite) resumeWithRoots(roots []cid.Cid) error { + // On resumption it is expected that the CARv2 Pragma, and the CARv1 header is successfully written. + // Otherwise we cannot resume from the file. + // Read pragma to assert if b.f is indeed a CARv2. + version, err := carv2.ReadVersion(b.f) + if err != nil { + // The file is not a valid CAR file and cannot resume from it. + // Or the write must have failed before pragma was written. + return err + } + if version != 2 { + // The file is not a CARv2 and we cannot resume from it. + return fmt.Errorf("cannot resume on CAR file with version %v", version) + } + + // Check if file was finalized by trying to read the CARv2 header. + // We check because if finalized the CARv1 reader behaviour needs to be adjusted since + // EOF will not signify end of CARv1 payload. i.e. index is most likely present. + var headerInFile carv2.Header + _, err = headerInFile.ReadFrom(NewOffsetReadSeeker(b.f, carv2.PragmaSize)) + + // If reading CARv2 header succeeded, and CARv1 offset in header is not zero then the file is + // most-likely finalized. Check padding and truncate the file to remove index. + // Otherwise, carry on reading the v1 payload at offset determined from b.header. + if err == nil && headerInFile.DataOffset != 0 { + if headerInFile.DataOffset != b.header.DataOffset { + // Assert that the padding on file matches the given WithDataPadding option. + wantPadding := headerInFile.DataOffset - carv2.PragmaSize - carv2.HeaderSize + gotPadding := b.header.DataOffset - carv2.PragmaSize - carv2.HeaderSize + return fmt.Errorf( + "cannot resume from file with mismatched CARv1 offset; "+ + "`WithDataPadding` option must match the padding on file. "+ + "Expected padding value of %v but got %v", wantPadding, gotPadding, + ) + } else if headerInFile.DataSize == 0 { + // If CARv1 size is zero, since CARv1 offset wasn't, then the CARv2 header was + // most-likely partially written. Since we write the header last in Finalize then the + // file most-likely contains the index and we cannot know where it starts, therefore + // can't resume. + return errors.New("corrupt CARv2 header; cannot resume from file") + } + } + + // Use the given CARv1 padding to instantiate the CARv1 reader on file. + v1r := NewOffsetReadSeeker(b.ronly.backing, 0) + header, err := ReadHeader(v1r) + if err != nil { + // Cannot read the CARv1 header; the file is most likely corrupt. + return fmt.Errorf("error reading car header: %w", err) + } + if !header.Matches(CarHeader{Roots: roots, Version: 1}) { + // Cannot resume if version and root does not match. + return errors.New("cannot resume on file with mismatching data header") + } + + if headerInFile.DataOffset != 0 { + // If header in file contains the size of car v1, then the index is most likely present. + // Since we will need to re-generate the index, as the one in file is flattened, truncate + // the file so that the Readonly.backing has the right set of bytes to deal with. + // This effectively means resuming from a finalized file will wipe its index even if there + // are no blocks put unless the user calls finalize. + if err := b.f.Truncate(int64(headerInFile.DataOffset + headerInFile.DataSize)); err != nil { + return err + } + } + // Now that CARv2 header is present on file, clear it to avoid incorrect size and offset in + // header in case blocksotre is closed without finalization and is resumed from. + if err := b.unfinalize(); err != nil { + return fmt.Errorf("could not un-finalize: %w", err) + } + + // TODO See how we can reduce duplicate code here. + // The code here comes from car.GenerateIndex. + // Copied because we need to populate an insertindex, not a sorted index. + // Producing a sorted index via generate, then converting it to insertindex is not possible. + // Because Index interface does not expose internal records. + // This may be done as part of https://github.com/ipld/go-car/issues/95 + + offset, err := HeaderSize(header) + if err != nil { + return err + } + sectionOffset := int64(0) + if sectionOffset, err = v1r.Seek(int64(offset), io.SeekStart); err != nil { + return err + } + + for { + // Grab the length of the section. + // Note that ReadUvarint wants a ByteReader. + length, err := varint.ReadUvarint(v1r) + if err != nil { + if err == io.EOF { + break + } + return err + } + + // Null padding; by default it's an error. + if length == 0 { + if b.ronly.opts.ZeroLengthSectionAsEOF { + break + } else { + return fmt.Errorf("carv1 null padding not allowed by default; see WithZeroLegthSectionAsEOF") + } + } + + // Grab the CID. + n, c, err := cid.CidFromReader(v1r) + if err != nil { + return err + } + b.idx.insertNoReplace(c, uint64(sectionOffset)) + + // Seek to the next section by skipping the block. + // The section length includes the CID, so subtract it. + if sectionOffset, err = v1r.Seek(int64(length)-int64(n), io.SeekCurrent); err != nil { + return err + } + } + // Seek to the end of last skipped block where the writer should resume writing. + _, err = b.dataWriter.Seek(sectionOffset, io.SeekStart) + return err +} + +func (b *ReadWrite) unfinalize() error { + _, err := new(carv2.Header).WriteTo(NewOffsetWriter(b.f, carv2.PragmaSize)) + return err +} + +// Put puts a given block to the underlying datastore +func (b *ReadWrite) Put(ctx context.Context, blk blocks.Block) error { + // PutMany already checks b.ronly.closed. + return b.PutMany(ctx, []blocks.Block{blk}) +} + +// PutMany puts a slice of blocks at the same time using batching +// capabilities of the underlying datastore whenever possible. +func (b *ReadWrite) PutMany(ctx context.Context, blks []blocks.Block) error { + b.ronly.mu.Lock() + defer b.ronly.mu.Unlock() + + if b.ronly.closed { + return errClosed + } + + for _, bl := range blks { + c := bl.Cid() + + // If StoreIdentityCIDs option is disabled then treat IDENTITY CIDs like IdStore. + if !b.opts.StoreIdentityCIDs { + // Check for IDENTITY CID. If IDENTITY, ignore and move to the next block. + if _, ok, err := isIdentity(c); err != nil { + return err + } else if ok { + continue + } + } + + // Check if its size is too big. + // If larger than maximum allowed size, return error. + // Note, we need to check this regardless of whether we have IDENTITY CID or not. + // Since multhihash codes other than IDENTITY can result in large digests. + cSize := uint64(len(c.Bytes())) + if cSize > b.opts.MaxIndexCidSize { + return &carv2.ErrCidTooLarge{MaxSize: b.opts.MaxIndexCidSize, CurrentSize: cSize} + } + + if !b.opts.BlockstoreAllowDuplicatePuts { + if b.ronly.opts.BlockstoreUseWholeCIDs && b.idx.hasExactCID(c) { + continue // deduplicated by CID + } + if !b.ronly.opts.BlockstoreUseWholeCIDs { + _, err := b.idx.Get(c) + if err == nil { + continue // deduplicated by hash + } + } + } + + n := uint64(b.dataWriter.Position()) + if err := util.LdWrite(b.dataWriter, c.Bytes(), bl.RawData()); err != nil { + return err + } + b.idx.insertNoReplace(c, n) + } + return nil +} + +// Discard closes this blockstore without finalizing its header and index. +// After this call, the blockstore can no longer be used. +// +// Note that this call may block if any blockstore operations are currently in +// progress, including an AllKeysChan that hasn't been fully consumed or cancelled. +func (b *ReadWrite) Discard() { + // Same semantics as ReadOnly.Close, including allowing duplicate calls. + // The only difference is that our method is called Discard, + // to further clarify that we're not properly finalizing and writing a + // CARv2 file. + b.ronly.Close() +} + +// Finalize finalizes this blockstore by writing the CARv2 header, along with flattened index +// for more efficient subsequent read. +// After this call, the blockstore can no longer be used. +func (b *ReadWrite) Finalize() error { + b.ronly.mu.Lock() + defer b.ronly.mu.Unlock() + + if b.ronly.closed { + // Allow duplicate Finalize calls, just like Close. + // Still error, just like ReadOnly.Close; it should be discarded. + return fmt.Errorf("called Finalize on a closed blockstore") + } + + // TODO check if add index option is set and don't write the index then set index offset to zero. + b.header = b.header.WithDataSize(uint64(b.dataWriter.Position())) + b.header.Characteristics.SetFullyIndexed(b.opts.StoreIdentityCIDs) + + // Note that we can't use b.Close here, as that tries to grab the same + // mutex we're holding here. + defer b.ronly.closeWithoutMutex() + + // TODO if index not needed don't bother flattening it. + fi, err := b.idx.flatten(b.opts.IndexCodec) + if err != nil { + return err + } + if _, err := index.WriteTo(fi, NewOffsetWriter(b.f, int64(b.header.IndexOffset))); err != nil { + return err + } + if _, err := b.header.WriteTo(NewOffsetWriter(b.f, carv2.PragmaSize)); err != nil { + return err + } + + if err := b.ronly.closeWithoutMutex(); err != nil { + return err + } + return nil +} + +func (b *ReadWrite) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + return b.ronly.AllKeysChan(ctx) +} + +func (b *ReadWrite) Has(ctx context.Context, key cid.Cid) (bool, error) { + return b.ronly.Has(ctx, key) +} + +func (b *ReadWrite) Get(ctx context.Context, key cid.Cid) (blocks.Block, error) { + return b.ronly.Get(ctx, key) +} + +func (b *ReadWrite) GetSize(ctx context.Context, key cid.Cid) (int, error) { + return b.ronly.GetSize(ctx, key) +} + +func (b *ReadWrite) DeleteBlock(_ context.Context, _ cid.Cid) error { + return fmt.Errorf("ReadWrite blockstore does not support deleting blocks") +} + +func (b *ReadWrite) HashOnRead(enable bool) { + b.ronly.HashOnRead(enable) +} + +func (b *ReadWrite) Roots() ([]cid.Cid, error) { + return b.ronly.Roots() +} diff --git a/cmd/lib/stores/ro_bstores.go b/cmd/lib/stores/ro_bstores.go new file mode 100644 index 000000000..e4fcf95a0 --- /dev/null +++ b/cmd/lib/stores/ro_bstores.go @@ -0,0 +1,60 @@ +package stores + +import ( + "io" + "sync" + + bstore "github.com/ipfs/boxo/blockstore" + "golang.org/x/xerrors" +) + +// ReadOnlyBlockstores tracks open read blockstores. +type ReadOnlyBlockstores struct { + mu sync.RWMutex + stores map[string]bstore.Blockstore +} + +func NewReadOnlyBlockstores() *ReadOnlyBlockstores { + return &ReadOnlyBlockstores{ + stores: make(map[string]bstore.Blockstore), + } +} + +func (r *ReadOnlyBlockstores) Track(key string, bs bstore.Blockstore) (bool, error) { + r.mu.Lock() + defer r.mu.Unlock() + + if _, ok := r.stores[key]; ok { + return false, nil + } + + r.stores[key] = bs + return true, nil +} + +func (r *ReadOnlyBlockstores) Get(key string) (bstore.Blockstore, error) { + r.mu.RLock() + defer r.mu.RUnlock() + + if bs, ok := r.stores[key]; ok { + return bs, nil + } + + return nil, xerrors.Errorf("could not get blockstore for key %s: %w", key, ErrNotFound) +} + +func (r *ReadOnlyBlockstores) Untrack(key string) error { + r.mu.Lock() + defer r.mu.Unlock() + + if bs, ok := r.stores[key]; ok { + delete(r.stores, key) + if closer, ok := bs.(io.Closer); ok { + if err := closer.Close(); err != nil { + return xerrors.Errorf("failed to close read-only blockstore: %w", err) + } + } + } + + return nil +} diff --git a/cmd/lib/stores/rw_bstores.go b/cmd/lib/stores/rw_bstores.go new file mode 100644 index 000000000..526a16983 --- /dev/null +++ b/cmd/lib/stores/rw_bstores.go @@ -0,0 +1,63 @@ +package stores + +import ( + "sync" + + "github.com/ipfs/go-cid" + carv2 "github.com/ipld/go-car/v2" + "github.com/ipld/go-car/v2/blockstore" + "golang.org/x/xerrors" +) + +// ReadWriteBlockstores tracks open ReadWrite CAR blockstores. +type ReadWriteBlockstores struct { + mu sync.RWMutex + stores map[string]*blockstore.ReadWrite +} + +func NewReadWriteBlockstores() *ReadWriteBlockstores { + return &ReadWriteBlockstores{ + stores: make(map[string]*blockstore.ReadWrite), + } +} + +func (r *ReadWriteBlockstores) Get(key string) (*blockstore.ReadWrite, error) { + r.mu.RLock() + defer r.mu.RUnlock() + + if bs, ok := r.stores[key]; ok { + return bs, nil + } + return nil, xerrors.Errorf("could not get blockstore for key %s: %w", key, ErrNotFound) +} + +func (r *ReadWriteBlockstores) GetOrOpen(key string, path string, rootCid cid.Cid) (*blockstore.ReadWrite, error) { + r.mu.Lock() + defer r.mu.Unlock() + + if bs, ok := r.stores[key]; ok { + return bs, nil + } + + bs, err := blockstore.OpenReadWrite(path, []cid.Cid{rootCid}, blockstore.UseWholeCIDs(true), carv2.StoreIdentityCIDs(true)) + if err != nil { + return nil, xerrors.Errorf("failed to create read-write blockstore: %w", err) + } + r.stores[key] = bs + return bs, nil +} + +func (r *ReadWriteBlockstores) Untrack(key string) error { + r.mu.Lock() + defer r.mu.Unlock() + + if bs, ok := r.stores[key]; ok { + // If the blockstore has already been finalized, calling Finalize again + // will return an error. For our purposes it's simplest if Finalize is + // idempotent so we just ignore any error. + _ = bs.Finalize() + } + + delete(r.stores, key) + return nil +} diff --git a/cmd/migrate-lid/migrate_lid.go b/cmd/migrate-lid/migrate_lid.go index c27e1d57b..edcd5586f 100644 --- a/cmd/migrate-lid/migrate_lid.go +++ b/cmd/migrate-lid/migrate_lid.go @@ -12,9 +12,10 @@ import ( "sync/atomic" "time" - "github.com/filecoin-project/boost-gfm/piecestore" "github.com/filecoin-project/boost/cmd/lib" "github.com/filecoin-project/boost/db" + "github.com/filecoin-project/boost/markets/piecestore" + "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" "github.com/filecoin-project/boostd-data/ldb" "github.com/filecoin-project/boostd-data/model" "github.com/filecoin-project/boostd-data/svc" @@ -22,7 +23,6 @@ import ( "github.com/filecoin-project/boostd-data/yugabyte" "github.com/filecoin-project/boostd-data/yugabyte/migrations" "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-state-types/abi" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/node/modules" @@ -759,7 +759,7 @@ func migrateReversePiece(ctx context.Context, pieceCid cid.Cid, pieceDir StoreMi var pieceStoreDeals []piecestore.DealInfo pieceStorePieceInfo, err := ps.GetPieceInfo(pieceCid) if err != nil { - if !errors.Is(err, retrievalmarket.ErrNotFound) { + if !errors.Is(err, legacyretrievaltypes.ErrNotFound) { return 0, fmt.Errorf("getting piece info from piece store for piece %s", pieceCid) } } else { diff --git a/datatransfer/channelmonitor/channelmonitor.go b/datatransfer/channelmonitor/channelmonitor.go new file mode 100644 index 000000000..205e66163 --- /dev/null +++ b/datatransfer/channelmonitor/channelmonitor.go @@ -0,0 +1,509 @@ +package channelmonitor + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/bep/debounce" + logging "github.com/ipfs/go-log/v2" + "github.com/libp2p/go-libp2p/core/peer" + "golang.org/x/xerrors" + + "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/datatransfer/channels" +) + +var log = logging.Logger("dt-chanmon") + +type monitorAPI interface { + SubscribeToEvents(subscriber datatransfer.Subscriber) datatransfer.Unsubscribe + RestartDataTransferChannel(ctx context.Context, chid datatransfer.ChannelID) error + CloseDataTransferChannelWithError(ctx context.Context, chid datatransfer.ChannelID, cherr error) error + ConnectTo(context.Context, peer.ID) error + PeerID() peer.ID +} + +// Monitor watches the events for data transfer channels, and restarts +// a channel if there are timeouts / errors +type Monitor struct { + ctx context.Context + stop context.CancelFunc + mgr monitorAPI + cfg *Config + + lk sync.RWMutex + channels map[datatransfer.ChannelID]*monitoredChannel +} + +type Config struct { + // Max time to wait for other side to accept open channel request before attempting restart. + // Set to 0 to disable timeout. + AcceptTimeout time.Duration + // Debounce when restart is triggered by multiple errors + RestartDebounce time.Duration + // Backoff after restarting + RestartBackoff time.Duration + // Number of times to try to restart before failing + MaxConsecutiveRestarts uint32 + // Max time to wait for the responder to send a Complete message once all + // data has been sent. + // Set to 0 to disable timeout. + CompleteTimeout time.Duration + // Called when a restart completes successfully + OnRestartComplete func(id datatransfer.ChannelID) +} + +func NewMonitor(mgr monitorAPI, cfg *Config) *Monitor { + checkConfig(cfg) + ctx, cancel := context.WithCancel(context.Background()) + return &Monitor{ + ctx: ctx, + stop: cancel, + mgr: mgr, + cfg: cfg, + channels: make(map[datatransfer.ChannelID]*monitoredChannel), + } +} + +func checkConfig(cfg *Config) { + if cfg == nil { + return + } + + prefix := "data-transfer channel monitor config " + if cfg.AcceptTimeout < 0 { + panic(fmt.Sprintf(prefix+"AcceptTimeout is %s but must be >= 0", cfg.AcceptTimeout)) + } + if cfg.MaxConsecutiveRestarts == 0 { + panic(fmt.Sprintf(prefix+"MaxConsecutiveRestarts is %d but must be > 0", cfg.MaxConsecutiveRestarts)) + } + if cfg.CompleteTimeout < 0 { + panic(fmt.Sprintf(prefix+"CompleteTimeout is %s but must be >= 0", cfg.CompleteTimeout)) + } +} + +// AddPushChannel adds a push channel to the channel monitor +func (m *Monitor) AddPushChannel(chid datatransfer.ChannelID) *monitoredChannel { + return m.addChannel(chid, true) +} + +// AddPullChannel adds a pull channel to the channel monitor +func (m *Monitor) AddPullChannel(chid datatransfer.ChannelID) *monitoredChannel { + return m.addChannel(chid, false) +} + +// addChannel adds a channel to the channel monitor +func (m *Monitor) addChannel(chid datatransfer.ChannelID, isPush bool) *monitoredChannel { + if !m.enabled() { + return nil + } + + m.lk.Lock() + defer m.lk.Unlock() + + // Check if there is already a monitor for this channel + if _, ok := m.channels[chid]; ok { + tp := "push" + if !isPush { + tp = "pull" + } + log.Warnf("ignoring add %s channel %s: %s channel with that id already exists", + tp, chid, tp) + return nil + } + + mpc := newMonitoredChannel(m.ctx, m.mgr, chid, m.cfg, m.onMonitoredChannelShutdown) + m.channels[chid] = mpc + return mpc +} + +func (m *Monitor) Shutdown() { + // Cancel the context for the Monitor + m.stop() +} + +// onShutdown shuts down all monitored channels. It is called when the run +// loop exits. +func (m *Monitor) onShutdown() { + m.lk.RLock() + defer m.lk.RUnlock() + + for _, ch := range m.channels { + ch.Shutdown() + } +} + +// onMonitoredChannelShutdown is called when a monitored channel shuts down +func (m *Monitor) onMonitoredChannelShutdown(chid datatransfer.ChannelID) { + m.lk.Lock() + defer m.lk.Unlock() + + delete(m.channels, chid) +} + +// enabled indicates whether the channel monitor is running +func (m *Monitor) enabled() bool { + return m.cfg != nil +} + +// monitoredChannel keeps track of events for a channel, and +// restarts the channel if there are connection issues +type monitoredChannel struct { + // The parentCtx is used when sending a close message for a channel, so + // that operation can continue even after the monitoredChannel is shutdown + parentCtx context.Context + ctx context.Context + cancel context.CancelFunc + mgr monitorAPI + chid datatransfer.ChannelID + cfg *Config + unsub datatransfer.Unsubscribe + restartChannelDebounced func(error) + onShutdown func(datatransfer.ChannelID) + shutdownLk sync.Mutex + + restartLk sync.RWMutex + restartedAt time.Time + restartQueued bool + consecutiveRestarts int +} + +func newMonitoredChannel( + parentCtx context.Context, + mgr monitorAPI, + chid datatransfer.ChannelID, + cfg *Config, + onShutdown func(datatransfer.ChannelID), +) *monitoredChannel { + ctx, cancel := context.WithCancel(context.Background()) + mpc := &monitoredChannel{ + parentCtx: parentCtx, + ctx: ctx, + cancel: cancel, + mgr: mgr, + chid: chid, + cfg: cfg, + onShutdown: onShutdown, + } + + // "debounce" calls to restart channel, ie if there are multiple calls in a + // short space of time, only send a message to restart the channel once + var lk sync.Mutex + var lastErr error + debouncer := debounce.New(cfg.RestartDebounce) + mpc.restartChannelDebounced = func(err error) { + // Log the error at debug level + log.Debug(err.Error()) + + // Save the last error passed to restartChannelDebounced + lk.Lock() + lastErr = err + lk.Unlock() + + debouncer(func() { + // Log only the last error passed to restartChannelDebounced at warning level + lk.Lock() + log.Warnf("%s", lastErr) + lk.Unlock() + + // Restart the channel + mpc.restartChannel() + }) + } + + // Start monitoring the channel + mpc.start() + return mpc +} + +// Cancel the context and unsubscribe from events. +// Returns true if channel has not already been shutdown. +func (mc *monitoredChannel) Shutdown() bool { + mc.shutdownLk.Lock() + defer mc.shutdownLk.Unlock() + + // Check if the channel was already shut down + if mc.cancel == nil { + return false + } + mc.cancel() // cancel context so all go-routines exit + mc.cancel = nil + + // unsubscribe from data transfer events + mc.unsub() + + // Inform the Manager that this channel has shut down + go mc.onShutdown(mc.chid) + + return true +} + +func (mc *monitoredChannel) start() { + // Prevent shutdown until after startup + mc.shutdownLk.Lock() + defer mc.shutdownLk.Unlock() + + log.Debugf("%s: starting data-transfer channel monitoring", mc.chid) + + // Watch to make sure the responder accepts the channel in time + cancelAcceptTimer := mc.watchForResponderAccept() + + // Watch for data-transfer channel events + mc.unsub = mc.mgr.SubscribeToEvents(func(event datatransfer.Event, channelState datatransfer.ChannelState) { + if channelState.ChannelID() != mc.chid { + return + } + + // Once the channel completes, shut down the monitor + state := channelState.Status() + if channels.IsChannelCleaningUp(state) || channels.IsChannelTerminated(state) { + log.Debugf("%s: stopping data-transfer channel monitoring (event: %s / state: %s)", + mc.chid, datatransfer.Events[event.Code], datatransfer.Statuses[channelState.Status()]) + go mc.Shutdown() + return + } + + switch event.Code { + case datatransfer.Accept: + // The Accept event is fired when we receive an Accept message from the responder + cancelAcceptTimer() + case datatransfer.SendDataError: + // If the transport layer reports an error sending data over the wire, + // attempt to restart the channel + err := xerrors.Errorf("%s: data transfer transport send error, restarting data transfer", mc.chid) + go mc.restartChannelDebounced(err) + case datatransfer.ReceiveDataError: + // If the transport layer reports an error receiving data over the wire, + // attempt to restart the channel + err := xerrors.Errorf("%s: data transfer transport receive error, restarting data transfer", mc.chid) + go mc.restartChannelDebounced(err) + case datatransfer.FinishTransfer: + // The channel initiator has finished sending / receiving all data. + // Watch to make sure that the responder sends a message to acknowledge + // that the transfer is complete + go mc.watchForResponderComplete() + case datatransfer.DataSent, datatransfer.DataReceived: + // Some data was sent / received so reset the consecutive restart + // counter + mc.resetConsecutiveRestarts() + } + }) +} + +// watchForResponderAccept watches to make sure that the responder sends +// an Accept to our open channel request before the accept timeout. +// Returns a function that can be used to cancel the timer. +func (mc *monitoredChannel) watchForResponderAccept() func() { + // Check if the accept timeout is disabled + if mc.cfg.AcceptTimeout == 0 { + return func() {} + } + + // Start a timer for the accept timeout + timer := time.NewTimer(mc.cfg.AcceptTimeout) + + go func() { + defer timer.Stop() + + select { + case <-mc.ctx.Done(): + case <-timer.C: + // Timer expired before we received an Accept from the responder, + // fail the data transfer + err := xerrors.Errorf("%s: timed out waiting %s for Accept message from remote peer", + mc.chid, mc.cfg.AcceptTimeout) + mc.closeChannelAndShutdown(err) + } + }() + + return func() { timer.Stop() } +} + +// Wait up to the configured timeout for the responder to send a Complete message +func (mc *monitoredChannel) watchForResponderComplete() { + // Check if the complete timeout is disabled + if mc.cfg.CompleteTimeout == 0 { + return + } + + // Start a timer for the complete timeout + timer := time.NewTimer(mc.cfg.CompleteTimeout) + defer timer.Stop() + + select { + case <-mc.ctx.Done(): + // When the Complete message is received, the channel shuts down and + // its context is cancelled + case <-timer.C: + // Timer expired before we received a Complete message from the responder + err := xerrors.Errorf("%s: timed out waiting %s for Complete message from remote peer", + mc.chid, mc.cfg.CompleteTimeout) + mc.closeChannelAndShutdown(err) + } +} + +// clear the consecutive restart count (we do this when data is sent or +// received) +func (mc *monitoredChannel) resetConsecutiveRestarts() { + mc.restartLk.Lock() + defer mc.restartLk.Unlock() + + mc.consecutiveRestarts = 0 +} + +// Used by the tests +func (mc *monitoredChannel) isRestarting() bool { + mc.restartLk.Lock() + defer mc.restartLk.Unlock() + + return !mc.restartedAt.IsZero() +} + +// Send a restart message for the channel +func (mc *monitoredChannel) restartChannel() { + var restartedAt time.Time + mc.restartLk.Lock() + { + restartedAt = mc.restartedAt + if mc.restartedAt.IsZero() { + // If there is not already a restart in progress, we'll restart now + mc.restartedAt = time.Now() + } else { + // There is already a restart in progress, so queue up a restart + // for after the current one has completed + mc.restartQueued = true + } + } + mc.restartLk.Unlock() + + // Check if channel is already being restarted + if !restartedAt.IsZero() { + log.Debugf("%s: restart called but already restarting channel, "+ + "waiting to restart again (since %s; restart backoff is %s)", + mc.chid, time.Since(restartedAt), mc.cfg.RestartBackoff) + return + } + + for { + // Send the restart message + err := mc.doRestartChannel() + if err != nil { + // If there was an error restarting, close the channel and shutdown + // the monitor + mc.closeChannelAndShutdown(err) + return + } + + // Restart has completed, check if there's another restart queued up + restartAgain := false + mc.restartLk.Lock() + { + if mc.restartQueued { + // There's another restart queued up, so reset the restart time + // to now + mc.restartedAt = time.Now() + restartAgain = true + mc.restartQueued = false + } else { + // No other restarts queued up, so clear the restart time + mc.restartedAt = time.Time{} + } + } + mc.restartLk.Unlock() + + if !restartAgain { + // No restart queued, we're done + if mc.cfg.OnRestartComplete != nil { + mc.cfg.OnRestartComplete(mc.chid) + } + return + } + + // There was a restart queued, restart again + log.Debugf("%s: restart was queued - restarting again", mc.chid) + } +} + +func (mc *monitoredChannel) doRestartChannel() error { + // Keep track of the number of consecutive restarts with no data + // transferred + mc.restartLk.Lock() + mc.consecutiveRestarts++ + restartCount := mc.consecutiveRestarts + mc.restartLk.Unlock() + + if uint32(restartCount) > mc.cfg.MaxConsecutiveRestarts { + // If no data has been transferred since the last restart, and we've + // reached the consecutive restart limit, return an error + return xerrors.Errorf("%s: after %d consecutive restarts failed to transfer any data", mc.chid, restartCount) + } + + // Send the restart message + log.Debugf("%s: restarting (%d consecutive restarts)", mc.chid, restartCount) + err := mc.sendRestartMessage(restartCount) + if err != nil { + log.Warnf("%s: restart failed, trying again: %s", mc.chid, err) + // If the restart message could not be sent, try again + return mc.doRestartChannel() + } + log.Infof("%s: restart completed successfully", mc.chid) + + return nil +} + +func (mc *monitoredChannel) sendRestartMessage(restartCount int) error { + // Establish a connection to the peer, in case the connection went down. + // Note that at the networking layer there is logic to retry if a network + // connection cannot be established, so this may take some time. + p := mc.chid.OtherParty(mc.mgr.PeerID()) + log.Debugf("%s: re-establishing connection to %s", mc.chid, p) + start := time.Now() + err := mc.mgr.ConnectTo(mc.ctx, p) + if err != nil { + return xerrors.Errorf("%s: failed to reconnect to peer %s after %s: %w", + mc.chid, p, time.Since(start), err) + } + log.Debugf("%s: re-established connection to %s in %s", mc.chid, p, time.Since(start)) + + // Send a restart message for the channel + log.Debugf("%s: sending restart message to %s (%d consecutive restarts)", mc.chid, p, restartCount) + err = mc.mgr.RestartDataTransferChannel(mc.ctx, mc.chid) + if err != nil { + return xerrors.Errorf("%s: failed to send restart message to %s: %w", mc.chid, p, err) + } + + // The restart message was sent successfully. + // If a restart backoff is configured, backoff after a restart before + // attempting another. + if mc.cfg.RestartBackoff > 0 { + log.Debugf("%s: backing off %s before allowing any other restarts", + mc.chid, mc.cfg.RestartBackoff) + select { + case <-time.After(mc.cfg.RestartBackoff): + log.Debugf("%s: restart back-off of %s complete", mc.chid, mc.cfg.RestartBackoff) + case <-mc.ctx.Done(): + return nil + } + } + + return nil +} + +// Shut down the monitor and close the data transfer channel +func (mc *monitoredChannel) closeChannelAndShutdown(cherr error) { + // Shutdown the monitor + firstShutdown := mc.Shutdown() + if !firstShutdown { + // Channel was already shutdown, ignore this second attempt to shutdown + return + } + + // Close the data transfer channel and fire an error + log.Errorf("%s: closing data-transfer channel: %s", mc.chid, cherr) + err := mc.mgr.CloseDataTransferChannelWithError(mc.parentCtx, mc.chid, cherr) + if err != nil { + log.Errorf("error closing data-transfer channel %s: %s", mc.chid, err) + } +} diff --git a/datatransfer/channels/block_index_cache.go b/datatransfer/channels/block_index_cache.go new file mode 100644 index 000000000..42142a30f --- /dev/null +++ b/datatransfer/channels/block_index_cache.go @@ -0,0 +1,63 @@ +package channels + +import ( + "sync" + "sync/atomic" + + "github.com/filecoin-project/boost/datatransfer" +) + +type readOriginalFn func(datatransfer.ChannelID) (int64, error) + +type blockIndexKey struct { + evt datatransfer.EventCode + chid datatransfer.ChannelID +} +type blockIndexCache struct { + lk sync.RWMutex + values map[blockIndexKey]*int64 +} + +func newBlockIndexCache() *blockIndexCache { + return &blockIndexCache{ + values: make(map[blockIndexKey]*int64), + } +} + +func (bic *blockIndexCache) getValue(evt datatransfer.EventCode, chid datatransfer.ChannelID, readFromOriginal readOriginalFn) (*int64, error) { + idxKey := blockIndexKey{evt, chid} + bic.lk.RLock() + value := bic.values[idxKey] + bic.lk.RUnlock() + if value != nil { + return value, nil + } + bic.lk.Lock() + defer bic.lk.Unlock() + value = bic.values[idxKey] + if value != nil { + return value, nil + } + newValue, err := readFromOriginal(chid) + if err != nil { + return nil, err + } + bic.values[idxKey] = &newValue + return &newValue, nil +} + +func (bic *blockIndexCache) updateIfGreater(evt datatransfer.EventCode, chid datatransfer.ChannelID, newIndex int64, readFromOriginal readOriginalFn) (bool, error) { + value, err := bic.getValue(evt, chid, readFromOriginal) + if err != nil { + return false, err + } + for { + currentIndex := atomic.LoadInt64(value) + if newIndex <= currentIndex { + return false, nil + } + if atomic.CompareAndSwapInt64(value, currentIndex, newIndex) { + return true, nil + } + } +} diff --git a/datatransfer/channels/channel_state.go b/datatransfer/channels/channel_state.go new file mode 100644 index 000000000..3de3baa59 --- /dev/null +++ b/datatransfer/channels/channel_state.go @@ -0,0 +1,251 @@ +package channels + +import ( + "bytes" + + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/libp2p/go-libp2p/core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/datatransfer/channels/internal" +) + +// channelState is immutable channel data plus mutable state +type channelState struct { + // peerId of the manager peer + selfPeer peer.ID + // an identifier for this channel shared by request and responder, set by requester through protocol + transferID datatransfer.TransferID + // base CID for the piece being transferred + baseCid cid.Cid + // portion of Piece to return, specified by an IPLD selector + selector *cbg.Deferred + // the party that is sending the data (not who initiated the request) + sender peer.ID + // the party that is receiving the data (not who initiated the request) + recipient peer.ID + // expected amount of data to be transferred + totalSize uint64 + // current status of this deal + status datatransfer.Status + // isPull indicates if this is a push or pull request + isPull bool + // total bytes read from this node and queued for sending (0 if receiver) + queued uint64 + // total bytes sent from this node (0 if receiver) + sent uint64 + // total bytes received by this node (0 if sender) + received uint64 + // number of blocks that have been received, including blocks that are + // present in more than one place in the DAG + receivedBlocksTotal int64 + // Number of blocks that have been queued, including blocks that are + // present in more than one place in the DAG + queuedBlocksTotal int64 + // Number of blocks that have been sent, including blocks that are + // present in more than one place in the DAG + sentBlocksTotal int64 + // more informative status on a channel + message string + // additional vouchers + vouchers []internal.EncodedVoucher + // additional voucherResults + voucherResults []internal.EncodedVoucherResult + voucherResultDecoder DecoderByTypeFunc + voucherDecoder DecoderByTypeFunc + + // stages tracks the timeline of events related to a data transfer, for + // traceability purposes. + stages *datatransfer.ChannelStages +} + +// EmptyChannelState is the zero value for channel state, meaning not present +var EmptyChannelState = channelState{} + +// Status is the current status of this channel +func (c channelState) Status() datatransfer.Status { return c.status } + +// Received returns the number of bytes received +func (c channelState) Queued() uint64 { return c.queued } + +// Sent returns the number of bytes sent +func (c channelState) Sent() uint64 { return c.sent } + +// Received returns the number of bytes received +func (c channelState) Received() uint64 { return c.received } + +// TransferID returns the transfer id for this channel +func (c channelState) TransferID() datatransfer.TransferID { return c.transferID } + +// BaseCID returns the CID that is at the root of this data transfer +func (c channelState) BaseCID() cid.Cid { return c.baseCid } + +// Selector returns the IPLD selector for this data transfer (represented as +// an IPLD node) +func (c channelState) Selector() ipld.Node { + builder := basicnode.Prototype.Any.NewBuilder() + reader := bytes.NewReader(c.selector.Raw) + err := dagcbor.Decode(builder, reader) + if err != nil { + log.Error(err) + } + return builder.Build() +} + +// Voucher returns the voucher for this data transfer +func (c channelState) Voucher() datatransfer.Voucher { + if len(c.vouchers) == 0 { + return nil + } + decoder, has := c.voucherDecoder(c.vouchers[0].Type) + if !has { + return nil + } + encodable, _ := decoder.DecodeFromCbor(c.vouchers[0].Voucher.Raw) + return encodable.(datatransfer.Voucher) +} + +// ReceivedCidsTotal returns the number of (non-unique) cids received so far +// on the channel - note that a block can exist in more than one place in the DAG +func (c channelState) ReceivedCidsTotal() int64 { + return c.receivedBlocksTotal +} + +// QueuedCidsTotal returns the number of (non-unique) cids queued so far +// on the channel - note that a block can exist in more than one place in the DAG +func (c channelState) QueuedCidsTotal() int64 { + return c.queuedBlocksTotal +} + +// SentCidsTotal returns the number of (non-unique) cids sent so far +// on the channel - note that a block can exist in more than one place in the DAG +func (c channelState) SentCidsTotal() int64 { + return c.sentBlocksTotal +} + +// Sender returns the peer id for the node that is sending data +func (c channelState) Sender() peer.ID { return c.sender } + +// Recipient returns the peer id for the node that is receiving data +func (c channelState) Recipient() peer.ID { return c.recipient } + +// TotalSize returns the total size for the data being transferred +func (c channelState) TotalSize() uint64 { return c.totalSize } + +// IsPull returns whether this is a pull request based on who initiated it +func (c channelState) IsPull() bool { + return c.isPull +} + +func (c channelState) ChannelID() datatransfer.ChannelID { + if c.isPull { + return datatransfer.ChannelID{ID: c.transferID, Initiator: c.recipient, Responder: c.sender} + } + return datatransfer.ChannelID{ID: c.transferID, Initiator: c.sender, Responder: c.recipient} +} + +func (c channelState) Message() string { + return c.message +} + +func (c channelState) Vouchers() []datatransfer.Voucher { + vouchers := make([]datatransfer.Voucher, 0, len(c.vouchers)) + for _, encoded := range c.vouchers { + decoder, has := c.voucherDecoder(encoded.Type) + if !has { + continue + } + encodable, _ := decoder.DecodeFromCbor(encoded.Voucher.Raw) + vouchers = append(vouchers, encodable.(datatransfer.Voucher)) + } + return vouchers +} + +func (c channelState) LastVoucher() datatransfer.Voucher { + decoder, has := c.voucherDecoder(c.vouchers[len(c.vouchers)-1].Type) + if !has { + return nil + } + encodable, _ := decoder.DecodeFromCbor(c.vouchers[len(c.vouchers)-1].Voucher.Raw) + return encodable.(datatransfer.Voucher) +} + +func (c channelState) LastVoucherResult() datatransfer.VoucherResult { + decoder, has := c.voucherResultDecoder(c.voucherResults[len(c.voucherResults)-1].Type) + if !has { + return nil + } + encodable, _ := decoder.DecodeFromCbor(c.voucherResults[len(c.voucherResults)-1].VoucherResult.Raw) + return encodable.(datatransfer.VoucherResult) +} + +func (c channelState) VoucherResults() []datatransfer.VoucherResult { + voucherResults := make([]datatransfer.VoucherResult, 0, len(c.voucherResults)) + for _, encoded := range c.voucherResults { + decoder, has := c.voucherResultDecoder(encoded.Type) + if !has { + continue + } + encodable, _ := decoder.DecodeFromCbor(encoded.VoucherResult.Raw) + voucherResults = append(voucherResults, encodable.(datatransfer.VoucherResult)) + } + return voucherResults +} + +func (c channelState) SelfPeer() peer.ID { + return c.selfPeer +} + +func (c channelState) OtherPeer() peer.ID { + if c.sender == c.selfPeer { + return c.recipient + } + return c.sender +} + +// Stages returns the current ChannelStages object, or an empty object. +// It is unsafe for the caller to modify the return value, and changes may not +// be persisted. It should be treated as immutable. +// +// EXPERIMENTAL; subject to change. +func (c channelState) Stages() *datatransfer.ChannelStages { + if c.stages == nil { + // return an empty placeholder; it will be discarded because the caller + // is not supposed to mutate the value anyway. + return &datatransfer.ChannelStages{} + } + + return c.stages +} + +func fromInternalChannelState(c internal.ChannelState, voucherDecoder DecoderByTypeFunc, voucherResultDecoder DecoderByTypeFunc) datatransfer.ChannelState { + return channelState{ + selfPeer: c.SelfPeer, + isPull: c.Initiator == c.Recipient, + transferID: c.TransferID, + baseCid: c.BaseCid, + selector: c.Selector, + sender: c.Sender, + recipient: c.Recipient, + totalSize: c.TotalSize, + status: c.Status, + queued: c.Queued, + sent: c.Sent, + received: c.Received, + receivedBlocksTotal: c.ReceivedBlocksTotal, + queuedBlocksTotal: c.QueuedBlocksTotal, + sentBlocksTotal: c.SentBlocksTotal, + message: c.Message, + vouchers: c.Vouchers, + voucherResults: c.VoucherResults, + voucherResultDecoder: voucherResultDecoder, + voucherDecoder: voucherDecoder, + stages: c.Stages, + } +} + +var _ datatransfer.ChannelState = channelState{} diff --git a/datatransfer/channels/channels.go b/datatransfer/channels/channels.go new file mode 100644 index 000000000..67d98332f --- /dev/null +++ b/datatransfer/channels/channels.go @@ -0,0 +1,414 @@ +package channels + +import ( + "context" + "errors" + "time" + + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipld/go-ipld-prime" + "github.com/libp2p/go-libp2p/core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + versioning "github.com/filecoin-project/go-ds-versioning/pkg" + versionedfsm "github.com/filecoin-project/go-ds-versioning/pkg/fsm" + "github.com/filecoin-project/go-statemachine" + "github.com/filecoin-project/go-statemachine/fsm" + + "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/datatransfer/channels/internal" + "github.com/filecoin-project/boost/datatransfer/channels/internal/migrations" + "github.com/filecoin-project/boost/datatransfer/encoding" +) + +type DecoderByTypeFunc func(identifier datatransfer.TypeIdentifier) (encoding.Decoder, bool) + +type Notifier func(datatransfer.Event, datatransfer.ChannelState) + +// ErrNotFound is returned when a channel cannot be found with a given channel ID +type ErrNotFound struct { + ChannelID datatransfer.ChannelID +} + +func (e *ErrNotFound) Error() string { + return "No channel for channel ID " + e.ChannelID.String() +} + +func NewErrNotFound(chid datatransfer.ChannelID) error { + return &ErrNotFound{ChannelID: chid} +} + +// ErrWrongType is returned when a caller attempts to change the type of implementation data after setting it +var ErrWrongType = errors.New("Cannot change type of implementation specific data after setting it") + +// Channels is a thread safe list of channels +type Channels struct { + notifier Notifier + voucherDecoder DecoderByTypeFunc + voucherResultDecoder DecoderByTypeFunc + blockIndexCache *blockIndexCache + stateMachines fsm.Group + migrateStateMachines func(context.Context) error +} + +// ChannelEnvironment -- just a proxy for DTNetwork for now +type ChannelEnvironment interface { + Protect(id peer.ID, tag string) + Unprotect(id peer.ID, tag string) bool + ID() peer.ID + CleanupChannel(chid datatransfer.ChannelID) +} + +// New returns a new thread safe list of channels +func New(ds datastore.Batching, + notifier Notifier, + voucherDecoder DecoderByTypeFunc, + voucherResultDecoder DecoderByTypeFunc, + env ChannelEnvironment, + selfPeer peer.ID) (*Channels, error) { + + c := &Channels{ + notifier: notifier, + voucherDecoder: voucherDecoder, + voucherResultDecoder: voucherResultDecoder, + } + c.blockIndexCache = newBlockIndexCache() + channelMigrations, err := migrations.GetChannelStateMigrations(selfPeer) + if err != nil { + return nil, err + } + c.stateMachines, c.migrateStateMachines, err = versionedfsm.NewVersionedFSM(ds, fsm.Parameters{ + Environment: env, + StateType: internal.ChannelState{}, + StateKeyField: "Status", + Events: ChannelEvents, + StateEntryFuncs: ChannelStateEntryFuncs, + Notifier: c.dispatch, + FinalityStates: ChannelFinalityStates, + }, channelMigrations, versioning.VersionKey("2")) + if err != nil { + return nil, err + } + return c, nil +} + +// Start migrates the channel data store as needed +func (c *Channels) Start(ctx context.Context) error { + return c.migrateStateMachines(ctx) +} + +func (c *Channels) dispatch(eventName fsm.EventName, channel fsm.StateType) { + evtCode, ok := eventName.(datatransfer.EventCode) + if !ok { + log.Errorf("dropped bad event %v", eventName) + } + realChannel, ok := channel.(internal.ChannelState) + if !ok { + log.Errorf("not a ClientDeal %v", channel) + } + evt := datatransfer.Event{ + Code: evtCode, + Message: realChannel.Message, + Timestamp: time.Now(), + } + log.Debugw("process data transfer listeners", "name", datatransfer.Events[evtCode], "transfer ID", realChannel.TransferID) + c.notifier(evt, c.fromInternalChannelState(realChannel)) +} + +// CreateNew creates a new channel id and channel state and saves to channels. +// returns error if the channel exists already. +func (c *Channels) CreateNew(selfPeer peer.ID, tid datatransfer.TransferID, baseCid cid.Cid, selector ipld.Node, voucher datatransfer.Voucher, initiator, dataSender, dataReceiver peer.ID) (datatransfer.ChannelID, error) { + var responder peer.ID + if dataSender == initiator { + responder = dataReceiver + } else { + responder = dataSender + } + chid := datatransfer.ChannelID{Initiator: initiator, Responder: responder, ID: tid} + voucherBytes, err := encoding.Encode(voucher) + if err != nil { + return datatransfer.ChannelID{}, err + } + selBytes, err := encoding.Encode(selector) + if err != nil { + return datatransfer.ChannelID{}, err + } + err = c.stateMachines.Begin(chid, &internal.ChannelState{ + SelfPeer: selfPeer, + TransferID: tid, + Initiator: initiator, + Responder: responder, + BaseCid: baseCid, + Selector: &cbg.Deferred{Raw: selBytes}, + Sender: dataSender, + Recipient: dataReceiver, + Stages: &datatransfer.ChannelStages{}, + Vouchers: []internal.EncodedVoucher{ + { + Type: voucher.Type(), + Voucher: &cbg.Deferred{ + Raw: voucherBytes, + }, + }, + }, + Status: datatransfer.Requested, + }) + if err != nil { + log.Errorw("failed to create new tracking channel for data-transfer", "channelID", chid, "err", err) + return datatransfer.ChannelID{}, err + } + log.Debugw("created tracking channel for data-transfer, emitting channel Open event", "channelID", chid) + return chid, c.stateMachines.Send(chid, datatransfer.Open) +} + +// InProgress returns a list of in progress channels +func (c *Channels) InProgress() (map[datatransfer.ChannelID]datatransfer.ChannelState, error) { + var internalChannels []internal.ChannelState + err := c.stateMachines.List(&internalChannels) + if err != nil { + return nil, err + } + channels := make(map[datatransfer.ChannelID]datatransfer.ChannelState, len(internalChannels)) + for _, internalChannel := range internalChannels { + channels[datatransfer.ChannelID{ID: internalChannel.TransferID, Responder: internalChannel.Responder, Initiator: internalChannel.Initiator}] = + c.fromInternalChannelState(internalChannel) + } + return channels, nil +} + +// GetByID searches for a channel in the slice of channels with id `chid`. +// Returns datatransfer.EmptyChannelState if there is no channel with that id +func (c *Channels) GetByID(ctx context.Context, chid datatransfer.ChannelID) (datatransfer.ChannelState, error) { + var internalChannel internal.ChannelState + err := c.stateMachines.GetSync(ctx, chid, &internalChannel) + if err != nil { + return nil, NewErrNotFound(chid) + } + return c.fromInternalChannelState(internalChannel), nil +} + +// Accept marks a data transfer as accepted +func (c *Channels) Accept(chid datatransfer.ChannelID) error { + return c.send(chid, datatransfer.Accept) +} + +func (c *Channels) ChannelOpened(chid datatransfer.ChannelID) error { + return c.send(chid, datatransfer.Opened) +} + +func (c *Channels) TransferRequestQueued(chid datatransfer.ChannelID) error { + return c.send(chid, datatransfer.TransferRequestQueued) +} + +// Restart marks a data transfer as restarted +func (c *Channels) Restart(chid datatransfer.ChannelID) error { + return c.send(chid, datatransfer.Restart) +} + +func (c *Channels) CompleteCleanupOnRestart(chid datatransfer.ChannelID) error { + return c.send(chid, datatransfer.CompleteCleanupOnRestart) +} + +func (c *Channels) getQueuedIndex(chid datatransfer.ChannelID) (int64, error) { + chst, err := c.GetByID(context.TODO(), chid) + if err != nil { + return 0, err + } + return chst.QueuedCidsTotal(), nil +} + +func (c *Channels) getReceivedIndex(chid datatransfer.ChannelID) (int64, error) { + chst, err := c.GetByID(context.TODO(), chid) + if err != nil { + return 0, err + } + return chst.ReceivedCidsTotal(), nil +} + +func (c *Channels) getSentIndex(chid datatransfer.ChannelID) (int64, error) { + chst, err := c.GetByID(context.TODO(), chid) + if err != nil { + return 0, err + } + return chst.SentCidsTotal(), nil +} + +func (c *Channels) DataSent(chid datatransfer.ChannelID, k cid.Cid, delta uint64, index int64, unique bool) (bool, error) { + return c.fireProgressEvent(chid, datatransfer.DataSent, datatransfer.DataSentProgress, k, delta, index, unique, c.getSentIndex) +} + +func (c *Channels) DataQueued(chid datatransfer.ChannelID, k cid.Cid, delta uint64, index int64, unique bool) (bool, error) { + return c.fireProgressEvent(chid, datatransfer.DataQueued, datatransfer.DataQueuedProgress, k, delta, index, unique, c.getQueuedIndex) +} + +// Returns true if this is the first time the block has been received +func (c *Channels) DataReceived(chid datatransfer.ChannelID, k cid.Cid, delta uint64, index int64, unique bool) (bool, error) { + new, err := c.fireProgressEvent(chid, datatransfer.DataReceived, datatransfer.DataReceivedProgress, k, delta, index, unique, c.getReceivedIndex) + return new, err +} + +// PauseInitiator pauses the initator of this channel +func (c *Channels) PauseInitiator(chid datatransfer.ChannelID) error { + return c.send(chid, datatransfer.PauseInitiator) +} + +// PauseResponder pauses the responder of this channel +func (c *Channels) PauseResponder(chid datatransfer.ChannelID) error { + return c.send(chid, datatransfer.PauseResponder) +} + +// ResumeInitiator resumes the initator of this channel +func (c *Channels) ResumeInitiator(chid datatransfer.ChannelID) error { + return c.send(chid, datatransfer.ResumeInitiator) +} + +// ResumeResponder resumes the responder of this channel +func (c *Channels) ResumeResponder(chid datatransfer.ChannelID) error { + return c.send(chid, datatransfer.ResumeResponder) +} + +// NewVoucher records a new voucher for this channel +func (c *Channels) NewVoucher(chid datatransfer.ChannelID, voucher datatransfer.Voucher) error { + voucherBytes, err := encoding.Encode(voucher) + if err != nil { + return err + } + return c.send(chid, datatransfer.NewVoucher, voucher.Type(), voucherBytes) +} + +// NewVoucherResult records a new voucher result for this channel +func (c *Channels) NewVoucherResult(chid datatransfer.ChannelID, voucherResult datatransfer.VoucherResult) error { + voucherResultBytes, err := encoding.Encode(voucherResult) + if err != nil { + return err + } + return c.send(chid, datatransfer.NewVoucherResult, voucherResult.Type(), voucherResultBytes) +} + +// Complete indicates responder has completed sending/receiving data +func (c *Channels) Complete(chid datatransfer.ChannelID) error { + return c.send(chid, datatransfer.Complete) +} + +// FinishTransfer an initiator has finished sending/receiving data +func (c *Channels) FinishTransfer(chid datatransfer.ChannelID) error { + return c.send(chid, datatransfer.FinishTransfer) +} + +// ResponderCompletes indicates an initator has finished receiving data from a responder +func (c *Channels) ResponderCompletes(chid datatransfer.ChannelID) error { + return c.send(chid, datatransfer.ResponderCompletes) +} + +// ResponderBeginsFinalization indicates a responder has finished processing but is awaiting confirmation from the initiator +func (c *Channels) ResponderBeginsFinalization(chid datatransfer.ChannelID) error { + return c.send(chid, datatransfer.ResponderBeginsFinalization) +} + +// BeginFinalizing indicates a responder has finished processing but is awaiting confirmation from the initiator +func (c *Channels) BeginFinalizing(chid datatransfer.ChannelID) error { + return c.send(chid, datatransfer.BeginFinalizing) +} + +// Cancel indicates a channel was cancelled prematurely +func (c *Channels) Cancel(chid datatransfer.ChannelID) error { + err := c.send(chid, datatransfer.Cancel) + + // If there was an error because the state machine already terminated, + // sending a Cancel event is redundant anyway, so just ignore it + if errors.Is(err, statemachine.ErrTerminated) { + return nil + } + + return err +} + +// Error indicates something that went wrong on a channel +func (c *Channels) Error(chid datatransfer.ChannelID, err error) error { + return c.send(chid, datatransfer.Error, err) +} + +// Disconnected indicates that the connection went down and it was not possible +// to restart it +func (c *Channels) Disconnected(chid datatransfer.ChannelID, err error) error { + return c.send(chid, datatransfer.Disconnected, err) +} + +// RequestCancelled indicates that a transport layer request was cancelled by the +// request opener +func (c *Channels) RequestCancelled(chid datatransfer.ChannelID, err error) error { + return c.send(chid, datatransfer.RequestCancelled, err) +} + +// SendDataError indicates that the transport layer had an error trying +// to send data to the remote peer +func (c *Channels) SendDataError(chid datatransfer.ChannelID, err error) error { + return c.send(chid, datatransfer.SendDataError, err) +} + +// ReceiveDataError indicates that the transport layer had an error receiving +// data from the remote peer +func (c *Channels) ReceiveDataError(chid datatransfer.ChannelID, err error) error { + return c.send(chid, datatransfer.ReceiveDataError, err) +} + +// HasChannel returns true if the given channel id is being tracked +func (c *Channels) HasChannel(chid datatransfer.ChannelID) (bool, error) { + return c.stateMachines.Has(chid) +} + +// fireProgressEvent fires +// - an event for queuing / sending / receiving blocks +// - a corresponding "progress" event if the block has not been seen before +// For example, if a block is being sent for the first time, the method will +// fire both DataSent AND DataSentProgress. +// If a block is resent, the method will fire DataSent but not DataSentProgress. +// Returns true if the block is new (both the event and a progress event were fired). +func (c *Channels) fireProgressEvent(chid datatransfer.ChannelID, evt datatransfer.EventCode, progressEvt datatransfer.EventCode, k cid.Cid, delta uint64, index int64, unique bool, readFromOriginal readOriginalFn) (bool, error) { + if err := c.checkChannelExists(chid, evt); err != nil { + return false, err + } + + isNewIndex, err := c.blockIndexCache.updateIfGreater(evt, chid, index, readFromOriginal) + if err != nil { + return false, err + } + + // If the block has not been seen before, fire the progress event + if unique && isNewIndex { + if err := c.stateMachines.Send(chid, progressEvt, delta); err != nil { + return false, err + } + } + + // Fire the regular event + return unique && isNewIndex, c.stateMachines.Send(chid, evt, index) +} + +func (c *Channels) send(chid datatransfer.ChannelID, code datatransfer.EventCode, args ...interface{}) error { + err := c.checkChannelExists(chid, code) + if err != nil { + return err + } + log.Debugw("send data transfer event", "name", datatransfer.Events[code], "transfer ID", chid.ID, "args", args) + return c.stateMachines.Send(chid, code, args...) +} + +func (c *Channels) checkChannelExists(chid datatransfer.ChannelID, code datatransfer.EventCode) error { + has, err := c.stateMachines.Has(chid) + if err != nil { + return err + } + if !has { + return xerrors.Errorf("cannot send FSM event %s to data-transfer channel %s: %w", + datatransfer.Events[code], chid, NewErrNotFound(chid)) + } + return nil +} + +// Convert from the internally used channel state format to the externally exposed ChannelState +func (c *Channels) fromInternalChannelState(ch internal.ChannelState) datatransfer.ChannelState { + return fromInternalChannelState(ch, c.voucherDecoder, c.voucherResultDecoder) +} diff --git a/datatransfer/channels/channels_fsm.go b/datatransfer/channels/channels_fsm.go new file mode 100644 index 000000000..2b8111e59 --- /dev/null +++ b/datatransfer/channels/channels_fsm.go @@ -0,0 +1,298 @@ +package channels + +import ( + logging "github.com/ipfs/go-log/v2" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-statemachine/fsm" + + "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/datatransfer/channels/internal" +) + +var log = logging.Logger("data-transfer") + +var transferringStates = []fsm.StateKey{ + datatransfer.Requested, + datatransfer.Ongoing, + datatransfer.InitiatorPaused, + datatransfer.ResponderPaused, + datatransfer.BothPaused, + datatransfer.ResponderCompleted, + datatransfer.ResponderFinalizing, +} + +// ChannelEvents describe the events taht can +var ChannelEvents = fsm.Events{ + // Open a channel + fsm.Event(datatransfer.Open).FromAny().To(datatransfer.Requested).Action(func(chst *internal.ChannelState) error { + chst.AddLog("") + return nil + }), + // Remote peer has accepted the Open channel request + fsm.Event(datatransfer.Accept).From(datatransfer.Requested).To(datatransfer.Ongoing).Action(func(chst *internal.ChannelState) error { + chst.AddLog("") + return nil + }), + + fsm.Event(datatransfer.TransferRequestQueued).FromAny().ToJustRecord().Action(func(chst *internal.ChannelState) error { + chst.Message = "" + chst.AddLog("") + return nil + }), + + fsm.Event(datatransfer.Restart).FromAny().ToJustRecord().Action(func(chst *internal.ChannelState) error { + chst.Message = "" + chst.AddLog("") + return nil + }), + fsm.Event(datatransfer.Cancel).FromAny().To(datatransfer.Cancelling).Action(func(chst *internal.ChannelState) error { + chst.AddLog("") + return nil + }), + + // When a channel is Opened, clear any previous error message. + // (eg if the channel is opened after being restarted due to a connection + // error) + fsm.Event(datatransfer.Opened).FromAny().ToJustRecord().Action(func(chst *internal.ChannelState) error { + chst.Message = "" + chst.AddLog("") + return nil + }), + + fsm.Event(datatransfer.DataReceived).FromAny().ToNoChange(). + Action(func(chst *internal.ChannelState, rcvdBlocksTotal int64) error { + if rcvdBlocksTotal > chst.ReceivedBlocksTotal { + chst.ReceivedBlocksTotal = rcvdBlocksTotal + } + chst.AddLog("") + return nil + }), + fsm.Event(datatransfer.DataReceivedProgress).FromMany(transferringStates...).ToNoChange(). + Action(func(chst *internal.ChannelState, delta uint64) error { + chst.Received += delta + chst.AddLog("received data") + return nil + }), + + fsm.Event(datatransfer.DataSent). + FromMany(transferringStates...).ToNoChange(). + From(datatransfer.TransferFinished).ToNoChange(). + Action(func(chst *internal.ChannelState, sentBlocksTotal int64) error { + if sentBlocksTotal > chst.SentBlocksTotal { + chst.SentBlocksTotal = sentBlocksTotal + } + chst.AddLog("") + return nil + }), + + fsm.Event(datatransfer.DataSentProgress).FromMany(transferringStates...).ToNoChange(). + Action(func(chst *internal.ChannelState, delta uint64) error { + chst.Sent += delta + chst.AddLog("sending data") + return nil + }), + + fsm.Event(datatransfer.DataQueued). + FromMany(transferringStates...).ToNoChange(). + From(datatransfer.TransferFinished).ToNoChange(). + Action(func(chst *internal.ChannelState, queuedBlocksTotal int64) error { + if queuedBlocksTotal > chst.QueuedBlocksTotal { + chst.QueuedBlocksTotal = queuedBlocksTotal + } + chst.AddLog("") + return nil + }), + fsm.Event(datatransfer.DataQueuedProgress).FromMany(transferringStates...).ToNoChange(). + Action(func(chst *internal.ChannelState, delta uint64) error { + chst.Queued += delta + chst.AddLog("") + return nil + }), + fsm.Event(datatransfer.Disconnected).FromAny().ToNoChange().Action(func(chst *internal.ChannelState, err error) error { + chst.Message = err.Error() + chst.AddLog("data transfer disconnected: %s", chst.Message) + return nil + }), + fsm.Event(datatransfer.SendDataError).FromAny().ToNoChange().Action(func(chst *internal.ChannelState, err error) error { + chst.Message = err.Error() + chst.AddLog("data transfer send error: %s", chst.Message) + return nil + }), + fsm.Event(datatransfer.ReceiveDataError).FromAny().ToNoChange().Action(func(chst *internal.ChannelState, err error) error { + chst.Message = err.Error() + chst.AddLog("data transfer receive error: %s", chst.Message) + return nil + }), + fsm.Event(datatransfer.RequestCancelled).FromAny().ToNoChange().Action(func(chst *internal.ChannelState, err error) error { + chst.Message = err.Error() + chst.AddLog("data transfer request cancelled: %s", chst.Message) + return nil + }), + fsm.Event(datatransfer.Error).FromAny().To(datatransfer.Failing).Action(func(chst *internal.ChannelState, err error) error { + chst.Message = err.Error() + chst.AddLog("data transfer erred: %s", chst.Message) + return nil + }), + + fsm.Event(datatransfer.NewVoucher).FromAny().ToNoChange(). + Action(func(chst *internal.ChannelState, vtype datatransfer.TypeIdentifier, voucherBytes []byte) error { + chst.Vouchers = append(chst.Vouchers, internal.EncodedVoucher{Type: vtype, Voucher: &cbg.Deferred{Raw: voucherBytes}}) + chst.AddLog("got new voucher") + return nil + }), + fsm.Event(datatransfer.NewVoucherResult).FromAny().ToNoChange(). + Action(func(chst *internal.ChannelState, vtype datatransfer.TypeIdentifier, voucherResultBytes []byte) error { + chst.VoucherResults = append(chst.VoucherResults, + internal.EncodedVoucherResult{Type: vtype, VoucherResult: &cbg.Deferred{Raw: voucherResultBytes}}) + chst.AddLog("got new voucher result") + return nil + }), + + fsm.Event(datatransfer.PauseInitiator). + FromMany(datatransfer.Requested, datatransfer.Ongoing).To(datatransfer.InitiatorPaused). + From(datatransfer.ResponderPaused).To(datatransfer.BothPaused). + FromAny().ToJustRecord().Action(func(chst *internal.ChannelState) error { + chst.AddLog("") + return nil + }), + + fsm.Event(datatransfer.PauseResponder). + FromMany(datatransfer.Requested, datatransfer.Ongoing).To(datatransfer.ResponderPaused). + From(datatransfer.InitiatorPaused).To(datatransfer.BothPaused). + FromAny().ToJustRecord().Action(func(chst *internal.ChannelState) error { + chst.AddLog("") + return nil + }), + + fsm.Event(datatransfer.ResumeInitiator). + From(datatransfer.InitiatorPaused).To(datatransfer.Ongoing). + From(datatransfer.BothPaused).To(datatransfer.ResponderPaused). + FromAny().ToJustRecord().Action(func(chst *internal.ChannelState) error { + chst.AddLog("") + return nil + }), + + fsm.Event(datatransfer.ResumeResponder). + From(datatransfer.ResponderPaused).To(datatransfer.Ongoing). + From(datatransfer.BothPaused).To(datatransfer.InitiatorPaused). + From(datatransfer.Finalizing).To(datatransfer.Completing). + FromAny().ToJustRecord().Action(func(chst *internal.ChannelState) error { + chst.AddLog("") + return nil + }), + + // The transfer has finished on the local node - all data was sent / received + fsm.Event(datatransfer.FinishTransfer). + FromAny().To(datatransfer.TransferFinished). + FromMany(datatransfer.Failing, datatransfer.Cancelling).ToJustRecord(). + From(datatransfer.ResponderCompleted).To(datatransfer.Completing). + From(datatransfer.ResponderFinalizing).To(datatransfer.ResponderFinalizingTransferFinished). + // If we are in the requested state, it means the other party simply never responded to our + // our data transfer, or we never actually contacted them. In any case, it's safe to skip + // the finalization process and complete the transfer + From(datatransfer.Requested).To(datatransfer.Completing). + Action(func(chst *internal.ChannelState) error { + chst.AddLog("") + return nil + }), + + fsm.Event(datatransfer.ResponderBeginsFinalization). + FromAny().To(datatransfer.ResponderFinalizing). + FromMany(datatransfer.Failing, datatransfer.Cancelling).ToJustRecord(). + From(datatransfer.TransferFinished).To(datatransfer.ResponderFinalizingTransferFinished).Action(func(chst *internal.ChannelState) error { + chst.AddLog("") + return nil + }), + + // The remote peer sent a Complete message, meaning it has sent / received all data + fsm.Event(datatransfer.ResponderCompletes). + FromAny().To(datatransfer.ResponderCompleted). + FromMany(datatransfer.Failing, datatransfer.Cancelling).ToJustRecord(). + From(datatransfer.ResponderPaused).To(datatransfer.ResponderFinalizing). + From(datatransfer.TransferFinished).To(datatransfer.Completing). + From(datatransfer.ResponderFinalizing).To(datatransfer.ResponderCompleted). + From(datatransfer.ResponderFinalizingTransferFinished).To(datatransfer.Completing).Action(func(chst *internal.ChannelState) error { + chst.AddLog("") + return nil + }), + + fsm.Event(datatransfer.BeginFinalizing).FromAny().To(datatransfer.Finalizing).Action(func(chst *internal.ChannelState) error { + chst.AddLog("") + return nil + }), + + // Both the local node and the remote peer have completed the transfer + fsm.Event(datatransfer.Complete).FromAny().To(datatransfer.Completing).Action(func(chst *internal.ChannelState) error { + chst.AddLog("") + return nil + }), + + fsm.Event(datatransfer.CleanupComplete). + From(datatransfer.Cancelling).To(datatransfer.Cancelled). + From(datatransfer.Failing).To(datatransfer.Failed). + From(datatransfer.Completing).To(datatransfer.Completed).Action(func(chst *internal.ChannelState) error { + chst.AddLog("") + return nil + }), + + // will kickoff state handlers for channels that were cleaning up + fsm.Event(datatransfer.CompleteCleanupOnRestart).FromAny().ToNoChange().Action(func(chst *internal.ChannelState) error { + chst.AddLog("") + return nil + }), +} + +// ChannelStateEntryFuncs are handlers called as we enter different states +// (currently unused for this fsm) +var ChannelStateEntryFuncs = fsm.StateEntryFuncs{ + datatransfer.Cancelling: cleanupConnection, + datatransfer.Failing: cleanupConnection, + datatransfer.Completing: cleanupConnection, +} + +func cleanupConnection(ctx fsm.Context, env ChannelEnvironment, channel internal.ChannelState) error { + otherParty := channel.Initiator + if otherParty == env.ID() { + otherParty = channel.Responder + } + env.CleanupChannel(datatransfer.ChannelID{ID: channel.TransferID, Initiator: channel.Initiator, Responder: channel.Responder}) + env.Unprotect(otherParty, datatransfer.ChannelID{ID: channel.TransferID, Initiator: channel.Initiator, Responder: channel.Responder}.String()) + return ctx.Trigger(datatransfer.CleanupComplete) +} + +// CleanupStates are the penultimate states for a channel +var CleanupStates = []fsm.StateKey{ + datatransfer.Cancelling, + datatransfer.Completing, + datatransfer.Failing, +} + +// ChannelFinalityStates are the final states for a channel +var ChannelFinalityStates = []fsm.StateKey{ + datatransfer.Cancelled, + datatransfer.Completed, + datatransfer.Failed, +} + +// IsChannelTerminated returns true if the channel is in a finality state +func IsChannelTerminated(st datatransfer.Status) bool { + for _, s := range ChannelFinalityStates { + if s == st { + return true + } + } + + return false +} + +// IsChannelCleaningUp returns true if channel was being cleaned up and finished +func IsChannelCleaningUp(st datatransfer.Status) bool { + for _, s := range CleanupStates { + if s == st { + return true + } + } + + return false +} diff --git a/datatransfer/channels/internal/internalchannel.go b/datatransfer/channels/internal/internalchannel.go new file mode 100644 index 000000000..f112d92a3 --- /dev/null +++ b/datatransfer/channels/internal/internalchannel.go @@ -0,0 +1,90 @@ +package internal + +import ( + "fmt" + + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/boost/datatransfer" +) + +//go:generate cbor-gen-for --map-encoding ChannelState EncodedVoucher EncodedVoucherResult + +// EncodedVoucher is how the voucher is stored on disk +type EncodedVoucher struct { + // Vouchers identifier for decoding + Type datatransfer.TypeIdentifier + // used to verify this channel + Voucher *cbg.Deferred +} + +// EncodedVoucherResult is how the voucher result is stored on disk +type EncodedVoucherResult struct { + // Vouchers identifier for decoding + Type datatransfer.TypeIdentifier + // used to verify this channel + VoucherResult *cbg.Deferred +} + +// ChannelState is the internal representation on disk for the channel fsm +type ChannelState struct { + // PeerId of the manager peer + SelfPeer peer.ID + // an identifier for this channel shared by request and responder, set by requester through protocol + TransferID datatransfer.TransferID + // Initiator is the person who intiated this datatransfer request + Initiator peer.ID + // Responder is the person who is responding to this datatransfer request + Responder peer.ID + // base CID for the piece being transferred + BaseCid cid.Cid + // portion of Piece to return, specified by an IPLD selector + Selector *cbg.Deferred + // the party that is sending the data (not who initiated the request) + Sender peer.ID + // the party that is receiving the data (not who initiated the request) + Recipient peer.ID + // expected amount of data to be transferred + TotalSize uint64 + // current status of this deal + Status datatransfer.Status + // total bytes read from this node and queued for sending (0 if receiver) + Queued uint64 + // total bytes sent from this node (0 if receiver) + Sent uint64 + // total bytes received by this node (0 if sender) + Received uint64 + // more informative status on a channel + Message string + Vouchers []EncodedVoucher + VoucherResults []EncodedVoucherResult + // Number of blocks that have been received, including blocks that are + // present in more than one place in the DAG + ReceivedBlocksTotal int64 + // Number of blocks that have been queued, including blocks that are + // present in more than one place in the DAG + QueuedBlocksTotal int64 + // Number of blocks that have been sent, including blocks that are + // present in more than one place in the DAG + SentBlocksTotal int64 + // Stages traces the execution fo a data transfer. + // + // EXPERIMENTAL; subject to change. + Stages *datatransfer.ChannelStages +} + +// AddLog takes an fmt string with arguments, and adds the formatted string to +// the logs for the current deal stage. +// +// EXPERIMENTAL; subject to change. +func (cs *ChannelState) AddLog(msg string, a ...interface{}) { + if len(a) > 0 { + msg = fmt.Sprintf(msg, a...) + } + + stage := datatransfer.Statuses[cs.Status] + + cs.Stages.AddLog(stage, msg) +} diff --git a/datatransfer/channels/internal/internalchannel_cbor_gen.go b/datatransfer/channels/internal/internalchannel_cbor_gen.go new file mode 100644 index 000000000..81c4ca710 --- /dev/null +++ b/datatransfer/channels/internal/internalchannel_cbor_gen.go @@ -0,0 +1,1043 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package internal + +import ( + "fmt" + "io" + "sort" + + datatransfer "github.com/filecoin-project/boost/datatransfer" + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p/core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = sort.Sort + +func (t *ChannelState) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{180}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.SelfPeer (peer.ID) (string) + if len("SelfPeer") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SelfPeer\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("SelfPeer"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("SelfPeer")); err != nil { + return err + } + + if len(t.SelfPeer) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.SelfPeer was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.SelfPeer))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.SelfPeer)); err != nil { + return err + } + + // t.TransferID (datatransfer.TransferID) (uint64) + if len("TransferID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TransferID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("TransferID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("TransferID")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.TransferID)); err != nil { + return err + } + + // t.Initiator (peer.ID) (string) + if len("Initiator") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Initiator\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Initiator"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Initiator")); err != nil { + return err + } + + if len(t.Initiator) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Initiator was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Initiator))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Initiator)); err != nil { + return err + } + + // t.Responder (peer.ID) (string) + if len("Responder") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Responder\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Responder"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Responder")); err != nil { + return err + } + + if len(t.Responder) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Responder was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Responder))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Responder)); err != nil { + return err + } + + // t.BaseCid (cid.Cid) (struct) + if len("BaseCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"BaseCid\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("BaseCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("BaseCid")); err != nil { + return err + } + + if err := cbg.WriteCidBuf(scratch, w, t.BaseCid); err != nil { + return xerrors.Errorf("failed to write cid field t.BaseCid: %w", err) + } + + // t.Selector (typegen.Deferred) (struct) + if len("Selector") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Selector\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Selector"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Selector")); err != nil { + return err + } + + if err := t.Selector.MarshalCBOR(w); err != nil { + return err + } + + // t.Sender (peer.ID) (string) + if len("Sender") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Sender\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Sender"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Sender")); err != nil { + return err + } + + if len(t.Sender) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Sender was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Sender))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Sender)); err != nil { + return err + } + + // t.Recipient (peer.ID) (string) + if len("Recipient") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Recipient\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Recipient"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Recipient")); err != nil { + return err + } + + if len(t.Recipient) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Recipient was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Recipient))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Recipient)); err != nil { + return err + } + + // t.TotalSize (uint64) (uint64) + if len("TotalSize") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TotalSize\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("TotalSize"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("TotalSize")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.TotalSize)); err != nil { + return err + } + + // t.Status (datatransfer.Status) (uint64) + if len("Status") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Status\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Status"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Status")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err + } + + // t.Queued (uint64) (uint64) + if len("Queued") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Queued\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Queued"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Queued")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Queued)); err != nil { + return err + } + + // t.Sent (uint64) (uint64) + if len("Sent") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Sent\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Sent"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Sent")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Sent)); err != nil { + return err + } + + // t.Received (uint64) (uint64) + if len("Received") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Received\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Received"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Received")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Received)); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + + // t.Vouchers ([]internal.EncodedVoucher) (slice) + if len("Vouchers") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Vouchers\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Vouchers"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Vouchers")); err != nil { + return err + } + + if len(t.Vouchers) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Vouchers was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Vouchers))); err != nil { + return err + } + for _, v := range t.Vouchers { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + + // t.VoucherResults ([]internal.EncodedVoucherResult) (slice) + if len("VoucherResults") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"VoucherResults\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("VoucherResults"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("VoucherResults")); err != nil { + return err + } + + if len(t.VoucherResults) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.VoucherResults was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.VoucherResults))); err != nil { + return err + } + for _, v := range t.VoucherResults { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + + // t.ReceivedBlocksTotal (int64) (int64) + if len("ReceivedBlocksTotal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ReceivedBlocksTotal\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ReceivedBlocksTotal"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ReceivedBlocksTotal")); err != nil { + return err + } + + if t.ReceivedBlocksTotal >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ReceivedBlocksTotal)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.ReceivedBlocksTotal-1)); err != nil { + return err + } + } + + // t.QueuedBlocksTotal (int64) (int64) + if len("QueuedBlocksTotal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"QueuedBlocksTotal\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("QueuedBlocksTotal"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("QueuedBlocksTotal")); err != nil { + return err + } + + if t.QueuedBlocksTotal >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.QueuedBlocksTotal)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.QueuedBlocksTotal-1)); err != nil { + return err + } + } + + // t.SentBlocksTotal (int64) (int64) + if len("SentBlocksTotal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SentBlocksTotal\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("SentBlocksTotal"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("SentBlocksTotal")); err != nil { + return err + } + + if t.SentBlocksTotal >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SentBlocksTotal)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SentBlocksTotal-1)); err != nil { + return err + } + } + + // t.Stages (datatransfer.ChannelStages) (struct) + if len("Stages") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Stages\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Stages"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Stages")); err != nil { + return err + } + + if err := t.Stages.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *ChannelState) UnmarshalCBOR(r io.Reader) error { + *t = ChannelState{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("ChannelState: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.SelfPeer (peer.ID) (string) + case "SelfPeer": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.SelfPeer = peer.ID(sval) + } + // t.TransferID (datatransfer.TransferID) (uint64) + case "TransferID": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.TransferID = datatransfer.TransferID(extra) + + } + // t.Initiator (peer.ID) (string) + case "Initiator": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Initiator = peer.ID(sval) + } + // t.Responder (peer.ID) (string) + case "Responder": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Responder = peer.ID(sval) + } + // t.BaseCid (cid.Cid) (struct) + case "BaseCid": + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.BaseCid: %w", err) + } + + t.BaseCid = c + + } + // t.Selector (typegen.Deferred) (struct) + case "Selector": + + { + + t.Selector = new(cbg.Deferred) + + if err := t.Selector.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("failed to read deferred field: %w", err) + } + } + // t.Sender (peer.ID) (string) + case "Sender": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Sender = peer.ID(sval) + } + // t.Recipient (peer.ID) (string) + case "Recipient": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Recipient = peer.ID(sval) + } + // t.TotalSize (uint64) (uint64) + case "TotalSize": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.TotalSize = uint64(extra) + + } + // t.Status (datatransfer.Status) (uint64) + case "Status": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = datatransfer.Status(extra) + + } + // t.Queued (uint64) (uint64) + case "Queued": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Queued = uint64(extra) + + } + // t.Sent (uint64) (uint64) + case "Sent": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Sent = uint64(extra) + + } + // t.Received (uint64) (uint64) + case "Received": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Received = uint64(extra) + + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.Vouchers ([]internal.EncodedVoucher) (slice) + case "Vouchers": + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Vouchers: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Vouchers = make([]EncodedVoucher, extra) + } + + for i := 0; i < int(extra); i++ { + + var v EncodedVoucher + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Vouchers[i] = v + } + + // t.VoucherResults ([]internal.EncodedVoucherResult) (slice) + case "VoucherResults": + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.VoucherResults: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.VoucherResults = make([]EncodedVoucherResult, extra) + } + + for i := 0; i < int(extra); i++ { + + var v EncodedVoucherResult + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.VoucherResults[i] = v + } + + // t.ReceivedBlocksTotal (int64) (int64) + case "ReceivedBlocksTotal": + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.ReceivedBlocksTotal = int64(extraI) + } + // t.QueuedBlocksTotal (int64) (int64) + case "QueuedBlocksTotal": + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.QueuedBlocksTotal = int64(extraI) + } + // t.SentBlocksTotal (int64) (int64) + case "SentBlocksTotal": + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.SentBlocksTotal = int64(extraI) + } + // t.Stages (datatransfer.ChannelStages) (struct) + case "Stages": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.Stages = new(datatransfer.ChannelStages) + if err := t.Stages.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Stages pointer: %w", err) + } + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *EncodedVoucher) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{162}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Type (datatransfer.TypeIdentifier) (string) + if len("Type") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Type\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Type"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Type")); err != nil { + return err + } + + if len(t.Type) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Type was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Type))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Type)); err != nil { + return err + } + + // t.Voucher (typegen.Deferred) (struct) + if len("Voucher") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Voucher\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Voucher"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Voucher")); err != nil { + return err + } + + if err := t.Voucher.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *EncodedVoucher) UnmarshalCBOR(r io.Reader) error { + *t = EncodedVoucher{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("EncodedVoucher: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Type (datatransfer.TypeIdentifier) (string) + case "Type": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Type = datatransfer.TypeIdentifier(sval) + } + // t.Voucher (typegen.Deferred) (struct) + case "Voucher": + + { + + t.Voucher = new(cbg.Deferred) + + if err := t.Voucher.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("failed to read deferred field: %w", err) + } + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *EncodedVoucherResult) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{162}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Type (datatransfer.TypeIdentifier) (string) + if len("Type") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Type\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Type"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Type")); err != nil { + return err + } + + if len(t.Type) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Type was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Type))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Type)); err != nil { + return err + } + + // t.VoucherResult (typegen.Deferred) (struct) + if len("VoucherResult") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"VoucherResult\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("VoucherResult"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("VoucherResult")); err != nil { + return err + } + + if err := t.VoucherResult.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *EncodedVoucherResult) UnmarshalCBOR(r io.Reader) error { + *t = EncodedVoucherResult{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("EncodedVoucherResult: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Type (datatransfer.TypeIdentifier) (string) + case "Type": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Type = datatransfer.TypeIdentifier(sval) + } + // t.VoucherResult (typegen.Deferred) (struct) + case "VoucherResult": + + { + + t.VoucherResult = new(cbg.Deferred) + + if err := t.VoucherResult.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("failed to read deferred field: %w", err) + } + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} diff --git a/datatransfer/channels/internal/migrations/migrations.go b/datatransfer/channels/internal/migrations/migrations.go new file mode 100644 index 000000000..1f74b278e --- /dev/null +++ b/datatransfer/channels/internal/migrations/migrations.go @@ -0,0 +1,13 @@ +package migrations + +import ( + "github.com/libp2p/go-libp2p/core/peer" + + versioning "github.com/filecoin-project/go-ds-versioning/pkg" + "github.com/filecoin-project/go-ds-versioning/pkg/versioned" +) + +// GetChannelStateMigrations returns a migration list for the channel states +func GetChannelStateMigrations(selfPeer peer.ID) (versioning.VersionedMigrationList, error) { + return versioned.BuilderList{}.Build() +} diff --git a/datatransfer/encoding/encoding.go b/datatransfer/encoding/encoding.go new file mode 100644 index 000000000..dec7abcd7 --- /dev/null +++ b/datatransfer/encoding/encoding.go @@ -0,0 +1,171 @@ +package encoding + +import ( + "bytes" + "reflect" + + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + "github.com/ipld/go-ipld-prime/datamodel" + "github.com/ipld/go-ipld-prime/node/basicnode" + "github.com/ipld/go-ipld-prime/schema" + cborgen "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" +) + +// Encodable is an object that can be written to CBOR and decoded back +type Encodable interface{} + +// Encode encodes an encodable to CBOR, using the best available path for +// writing to CBOR +func Encode(value Encodable) ([]byte, error) { + if cbgEncodable, ok := value.(cborgen.CBORMarshaler); ok { + buf := new(bytes.Buffer) + err := cbgEncodable.MarshalCBOR(buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil + } + if ipldEncodable, ok := value.(datamodel.Node); ok { + if tn, ok := ipldEncodable.(schema.TypedNode); ok { + ipldEncodable = tn.Representation() + } + buf := &bytes.Buffer{} + err := dagcbor.Encode(ipldEncodable, buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil + } + return cbor.DumpObject(value) +} + +func EncodeToNode(encodable Encodable) (datamodel.Node, error) { + byts, err := Encode(encodable) + if err != nil { + return nil, err + } + na := basicnode.Prototype.Any.NewBuilder() + if err := dagcbor.Decode(na, bytes.NewReader(byts)); err != nil { + return nil, err + } + return na.Build(), nil +} + +// Decoder is CBOR decoder for a given encodable type +type Decoder interface { + DecodeFromCbor([]byte) (Encodable, error) + DecodeFromNode(datamodel.Node) (Encodable, error) +} + +// NewDecoder creates a new Decoder that will decode into new instances of the given +// object type. It will use the decoding that is optimal for that type +// It returns error if it's not possible to setup a decoder for this type +func NewDecoder(decodeType Encodable) (Decoder, error) { + // check if type is datamodel.Node, if so, just use style + if ipldDecodable, ok := decodeType.(datamodel.Node); ok { + return &ipldDecoder{ipldDecodable.Prototype()}, nil + } + // check if type is a pointer, as we need that to make new copies + // for cborgen types & regular IPLD types + decodeReflectType := reflect.TypeOf(decodeType) + if decodeReflectType.Kind() != reflect.Ptr { + return nil, xerrors.New("type must be a pointer") + } + // check if type is a cbor-gen type + if _, ok := decodeType.(cborgen.CBORUnmarshaler); ok { + return &cbgDecoder{decodeReflectType}, nil + } + // type does is neither ipld-prime nor cbor-gen, so we need to see if it + // can rountrip with oldschool ipld-format + encoded, err := cbor.DumpObject(decodeType) + if err != nil { + return nil, xerrors.New("Object type did not encode") + } + newDecodable := reflect.New(decodeReflectType.Elem()).Interface() + if err := cbor.DecodeInto(encoded, newDecodable); err != nil { + return nil, xerrors.New("Object type did not decode") + } + return &defaultDecoder{decodeReflectType}, nil +} + +type ipldDecoder struct { + style ipld.NodePrototype +} + +func (decoder *ipldDecoder) DecodeFromCbor(encoded []byte) (Encodable, error) { + builder := decoder.style.NewBuilder() + buf := bytes.NewReader(encoded) + err := dagcbor.Decode(builder, buf) + if err != nil { + return nil, err + } + return builder.Build(), nil +} + +func (decoder *ipldDecoder) DecodeFromNode(node datamodel.Node) (Encodable, error) { + builder := decoder.style.NewBuilder() + if err := builder.AssignNode(node); err != nil { + return nil, err + } + return builder.Build(), nil +} + +type cbgDecoder struct { + cbgType reflect.Type +} + +func (decoder *cbgDecoder) DecodeFromCbor(encoded []byte) (Encodable, error) { + decodedValue := reflect.New(decoder.cbgType.Elem()) + decoded, ok := decodedValue.Interface().(cborgen.CBORUnmarshaler) + if !ok || reflect.ValueOf(decoded).IsNil() { + return nil, xerrors.New("problem instantiating decoded value") + } + buf := bytes.NewReader(encoded) + err := decoded.UnmarshalCBOR(buf) + if err != nil { + return nil, err + } + return decoded, nil +} + +func (decoder *cbgDecoder) DecodeFromNode(node datamodel.Node) (Encodable, error) { + if tn, ok := node.(schema.TypedNode); ok { + node = tn.Representation() + } + buf := &bytes.Buffer{} + if err := dagcbor.Encode(node, buf); err != nil { + return nil, err + } + return decoder.DecodeFromCbor(buf.Bytes()) +} + +type defaultDecoder struct { + ptrType reflect.Type +} + +func (decoder *defaultDecoder) DecodeFromCbor(encoded []byte) (Encodable, error) { + decodedValue := reflect.New(decoder.ptrType.Elem()) + decoded, ok := decodedValue.Interface().(Encodable) + if !ok || reflect.ValueOf(decoded).IsNil() { + return nil, xerrors.New("problem instantiating decoded value") + } + err := cbor.DecodeInto(encoded, decoded) + if err != nil { + return nil, err + } + return decoded, nil +} + +func (decoder *defaultDecoder) DecodeFromNode(node datamodel.Node) (Encodable, error) { + if tn, ok := node.(schema.TypedNode); ok { + node = tn.Representation() + } + buf := &bytes.Buffer{} + if err := dagcbor.Encode(node, buf); err != nil { + return nil, err + } + return decoder.DecodeFromCbor(buf.Bytes()) +} diff --git a/datatransfer/errors.go b/datatransfer/errors.go new file mode 100644 index 000000000..0e9903f6d --- /dev/null +++ b/datatransfer/errors.go @@ -0,0 +1,32 @@ +package datatransfer + +type errorType string + +func (e errorType) Error() string { + return string(e) +} + +// ErrHandlerAlreadySet means an event handler was already set for this instance of +// hooks +const ErrHandlerAlreadySet = errorType("already set event handler") + +// ErrHandlerNotSet means you cannot issue commands to this interface because the +// handler has not been set +const ErrHandlerNotSet = errorType("event handler has not been set") + +// ErrChannelNotFound means the channel this command was issued for does not exist +const ErrChannelNotFound = errorType("channel not found") + +// ErrPause is a special error that the DataReceived / DataSent hooks can +// use to pause the channel +const ErrPause = errorType("pause channel") + +// ErrResume is a special error that the RequestReceived / ResponseReceived hooks can +// use to resume the channel +const ErrResume = errorType("resume channel") + +// ErrRejected indicates a request was not accepted +const ErrRejected = errorType("response rejected") + +// ErrUnsupported indicates an operation is not supported by the transport protocol +const ErrUnsupported = errorType("unsupported") diff --git a/datatransfer/events.go b/datatransfer/events.go new file mode 100644 index 000000000..664579c43 --- /dev/null +++ b/datatransfer/events.go @@ -0,0 +1,160 @@ +package datatransfer + +import "time" + +// EventCode is a name for an event that occurs on a data transfer channel +type EventCode int + +const ( + // Open is an event occurs when a channel is first opened + Open EventCode = iota + + // Accept is an event that emits when the data transfer is first accepted + Accept + + // Restart is an event that emits when the data transfer is restarted + Restart + + // DataReceived is emitted when data is received on the channel from a remote peer + DataReceived + + // DataSent is emitted when data is sent on the channel to the remote peer + DataSent + + // Cancel indicates one side has cancelled the transfer + Cancel + + // Error is an event that emits when an error occurs in a data transfer + Error + + // CleanupComplete emits when a request is cleaned up + CleanupComplete + + // NewVoucher means we have a new voucher on this channel + NewVoucher + + // NewVoucherResult means we have a new voucher result on this channel + NewVoucherResult + + // PauseInitiator emits when the data sender pauses transfer + PauseInitiator + + // ResumeInitiator emits when the data sender resumes transfer + ResumeInitiator + + // PauseResponder emits when the data receiver pauses transfer + PauseResponder + + // ResumeResponder emits when the data receiver resumes transfer + ResumeResponder + + // FinishTransfer emits when the initiator has completed sending/receiving data + FinishTransfer + + // ResponderCompletes emits when the initiator receives a message that the responder is finished + ResponderCompletes + + // ResponderBeginsFinalization emits when the initiator receives a message that the responder is finilizing + ResponderBeginsFinalization + + // BeginFinalizing emits when the responder completes its operations but awaits a response from the + // initiator + BeginFinalizing + + // Disconnected emits when we are not able to connect to the other party + Disconnected + + // Complete is emitted when a data transfer is complete + Complete + + // CompleteCleanupOnRestart is emitted when a data transfer channel is restarted to signal + // that channels that were cleaning up should finish cleanup + CompleteCleanupOnRestart + + // DataQueued is emitted when data is read and queued for sending to the remote peer + DataQueued + + // DataQueuedProgress is emitted when a block is queued for sending to the + // remote peer. It is not emitted when the block is resent. + // It is used to measure progress of how much of the total data has been + // queued. + DataQueuedProgress + + // DataSentProgress is emitted when a block is sent to the remote peer. + // It is not emitted when the block is resent. + // It is used to measure progress of how much of the total data has + // been sent. + DataSentProgress + + // DataReceivedProgress is emitted the first time a block is received from + // the remote peer. It is used to measure progress of how much of the total + // data has been received. + DataReceivedProgress + + // Deprecated in favour of RequestCancelled + RequestTimedOut + + // SendDataError indicates that the transport layer had an error trying + // to send data to the remote peer + SendDataError + + // ReceiveDataError indicates that the transport layer had an error + // receiving data from the remote peer + ReceiveDataError + + // TransferRequestQueued indicates that a new data transfer request has been queued in the transport layer + TransferRequestQueued + + // RequestCancelled indicates that a transport layer request was cancelled by the request opener + RequestCancelled + + // Opened is fired when a request for data is sent from this node to a peer + Opened +) + +// Events are human readable names for data transfer events +var Events = map[EventCode]string{ + Open: "Open", + Accept: "Accept", + Restart: "Restart", + DataReceived: "DataReceived", + DataSent: "DataSent", + Cancel: "Cancel", + Error: "Error", + CleanupComplete: "CleanupComplete", + NewVoucher: "NewVoucher", + NewVoucherResult: "NewVoucherResult", + PauseInitiator: "PauseInitiator", + ResumeInitiator: "ResumeInitiator", + PauseResponder: "PauseResponder", + ResumeResponder: "ResumeResponder", + FinishTransfer: "FinishTransfer", + ResponderCompletes: "ResponderCompletes", + ResponderBeginsFinalization: "ResponderBeginsFinalization", + BeginFinalizing: "BeginFinalizing", + Disconnected: "Disconnected", + Complete: "Complete", + CompleteCleanupOnRestart: "CompleteCleanupOnRestart", + DataQueued: "DataQueued", + DataQueuedProgress: "DataQueuedProgress", + DataSentProgress: "DataSentProgress", + DataReceivedProgress: "DataReceivedProgress", + RequestTimedOut: "RequestTimedOut", + SendDataError: "SendDataError", + ReceiveDataError: "ReceiveDataError", + TransferRequestQueued: "TransferRequestQueued", + RequestCancelled: "RequestCancelled", +} + +// Event is a struct containing information about a data transfer event +type Event struct { + Code EventCode // What type of event it is + Message string // Any clarifying information about the event + Timestamp time.Time // when the event happened +} + +// Subscriber is a callback that is called when events are emitted +type Subscriber func(event Event, channelState ChannelState) + +// Unsubscribe is a function that gets called to unsubscribe from data transfer events +type Unsubscribe func() diff --git a/datatransfer/impl/environment.go b/datatransfer/impl/environment.go new file mode 100644 index 000000000..51b95064a --- /dev/null +++ b/datatransfer/impl/environment.go @@ -0,0 +1,27 @@ +package impl + +import ( + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/filecoin-project/boost/datatransfer" +) + +type channelEnvironment struct { + m *manager +} + +func (ce *channelEnvironment) Protect(id peer.ID, tag string) { + ce.m.dataTransferNetwork.Protect(id, tag) +} + +func (ce *channelEnvironment) Unprotect(id peer.ID, tag string) bool { + return ce.m.dataTransferNetwork.Unprotect(id, tag) +} + +func (ce *channelEnvironment) ID() peer.ID { + return ce.m.dataTransferNetwork.ID() +} + +func (ce *channelEnvironment) CleanupChannel(chid datatransfer.ChannelID) { + ce.m.transport.CleanupChannel(chid) +} diff --git a/datatransfer/impl/events.go b/datatransfer/impl/events.go new file mode 100644 index 000000000..f67d4e0c2 --- /dev/null +++ b/datatransfer/impl/events.go @@ -0,0 +1,590 @@ +package impl + +import ( + "context" + "errors" + + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/libp2p/go-libp2p/core/peer" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "golang.org/x/xerrors" + + "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/datatransfer/encoding" + "github.com/filecoin-project/boost/datatransfer/registry" +) + +// OnChannelOpened is called when we send a request for data to the other +// peer on the given channel ID +func (m *manager) OnChannelOpened(chid datatransfer.ChannelID) error { + log.Infof("channel %s: opened", chid) + + // Check if the channel is being tracked + has, err := m.channels.HasChannel(chid) + if err != nil { + return err + } + if !has { + return datatransfer.ErrChannelNotFound + } + + // Fire an event + return m.channels.ChannelOpened(chid) +} + +// OnDataReceived is called when the transport layer reports that it has +// received some data from the sender. +// It fires an event on the channel, updating the sum of received data and +// calls revalidators so they can pause / resume the channel or send a +// message over the transport. +func (m *manager) OnDataReceived(chid datatransfer.ChannelID, link ipld.Link, size uint64, index int64, unique bool) error { + ctx, _ := m.spansIndex.SpanForChannel(context.TODO(), chid) + ctx, span := otel.Tracer("data-transfer").Start(ctx, "dataReceived", trace.WithAttributes( + attribute.String("channelID", chid.String()), + attribute.String("link", link.String()), + attribute.Int64("index", index), + attribute.Int64("size", int64(size)), + )) + defer span.End() + + isNew, err := m.channels.DataReceived(chid, link.(cidlink.Link).Cid, size, index, unique) + if err != nil { + return err + } + + // If this block has already been received on the channel, take no further + // action (this can happen when the data-transfer channel is restarted) + if !isNew { + return nil + } + + // If this node initiated the data transfer, there's nothing more to do + if chid.Initiator == m.peerID { + return nil + } + + // Check each revalidator to see if they want to pause / resume, or send + // a message over the transport + var result datatransfer.VoucherResult + var handled bool + _ = m.revalidators.Each(func(_ datatransfer.TypeIdentifier, _ encoding.Decoder, processor registry.Processor) error { + revalidator := processor.(datatransfer.Revalidator) + handled, result, err = revalidator.OnPushDataReceived(chid, size) + if handled { + return errors.New("stop processing") + } + return nil + }) + if err != nil || result != nil { + msg, err := m.processRevalidationResult(chid, result, err) + if msg != nil { + ctx, _ := m.spansIndex.SpanForChannel(context.TODO(), chid) + if err := m.dataTransferNetwork.SendMessage(ctx, chid.Initiator, msg); err != nil { + return err + } + } + return err + } + + return nil +} + +// OnDataQueued is called when the transport layer reports that it has queued +// up some data to be sent to the requester. +// It fires an event on the channel, updating the sum of queued data and calls +// revalidators so they can pause / resume or send a message over the transport. +func (m *manager) OnDataQueued(chid datatransfer.ChannelID, link ipld.Link, size uint64, index int64, unique bool) (datatransfer.Message, error) { + // The transport layer reports that some data has been queued up to be sent + // to the requester, so fire a DataQueued event on the channels state + // machine. + + ctx, _ := m.spansIndex.SpanForChannel(context.TODO(), chid) + ctx, span := otel.Tracer("data-transfer").Start(ctx, "dataQueued", trace.WithAttributes( + attribute.String("channelID", chid.String()), + attribute.String("link", link.String()), + attribute.Int64("size", int64(size)), + )) + defer span.End() + + isNew, err := m.channels.DataQueued(chid, link.(cidlink.Link).Cid, size, index, unique) + if err != nil { + return nil, err + } + + // If this block has already been queued on the channel, take no further + // action (this can happen when the data-transfer channel is restarted) + if !isNew { + return nil, nil + } + + // If this node initiated the data transfer, there's nothing more to do + if chid.Initiator == m.peerID { + return nil, nil + } + + // Check each revalidator to see if they want to pause / resume, or send + // a message over the transport. + // For example if the data-sender is waiting for the receiver to pay for + // data they may pause the data-transfer. + var result datatransfer.VoucherResult + var handled bool + _ = m.revalidators.Each(func(_ datatransfer.TypeIdentifier, _ encoding.Decoder, processor registry.Processor) error { + revalidator := processor.(datatransfer.Revalidator) + handled, result, err = revalidator.OnPullDataSent(chid, size) + if handled { + return errors.New("stop processing") + } + return nil + }) + if err != nil || result != nil { + return m.processRevalidationResult(chid, result, err) + } + + return nil, nil +} + +func (m *manager) OnDataSent(chid datatransfer.ChannelID, link ipld.Link, size uint64, index int64, unique bool) error { + + ctx, _ := m.spansIndex.SpanForChannel(context.TODO(), chid) + ctx, span := otel.Tracer("data-transfer").Start(ctx, "dataSent", trace.WithAttributes( + attribute.String("channelID", chid.String()), + attribute.String("link", link.String()), + attribute.Int64("size", int64(size)), + )) + defer span.End() + + _, err := m.channels.DataSent(chid, link.(cidlink.Link).Cid, size, index, unique) + return err +} + +func (m *manager) OnRequestReceived(chid datatransfer.ChannelID, request datatransfer.Request) (datatransfer.Response, error) { + if request.IsRestart() { + return m.receiveRestartRequest(chid, request) + } + + if request.IsNew() { + return m.receiveNewRequest(chid, request) + } + if request.IsCancel() { + log.Infof("channel %s: received cancel request, cleaning up channel", chid) + + m.transport.CleanupChannel(chid) + return nil, m.channels.Cancel(chid) + } + if request.IsVoucher() { + return m.processUpdateVoucher(chid, request) + } + if request.IsPaused() { + return nil, m.pauseOther(chid) + } + err := m.resumeOther(chid) + if err != nil { + return nil, err + } + chst, err := m.channels.GetByID(context.TODO(), chid) + if err != nil { + return nil, err + } + if chst.Status() == datatransfer.ResponderPaused || + chst.Status() == datatransfer.ResponderFinalizing { + return nil, datatransfer.ErrPause + } + return nil, nil +} + +func (m *manager) OnTransferQueued(chid datatransfer.ChannelID) { + m.channels.TransferRequestQueued(chid) +} + +func (m *manager) OnResponseReceived(chid datatransfer.ChannelID, response datatransfer.Response) error { + if response.IsComplete() { + log.Infow("received complete response", "chid", chid, "isAccepted", response.Accepted()) + } + + if response.IsCancel() { + log.Infof("channel %s: received cancel response, cancelling channel", chid) + return m.channels.Cancel(chid) + } + if response.IsVoucherResult() { + if !response.EmptyVoucherResult() { + vresult, err := m.decodeVoucherResult(response) + if err != nil { + return err + } + err = m.channels.NewVoucherResult(chid, vresult) + if err != nil { + return err + } + } + if !response.Accepted() { + log.Infof("channel %s: received rejected response, erroring out channel", chid) + return m.channels.Error(chid, datatransfer.ErrRejected) + } + if response.IsNew() { + log.Infof("channel %s: received new response, accepting channel", chid) + err := m.channels.Accept(chid) + if err != nil { + return err + } + } + + if response.IsRestart() { + log.Infof("channel %s: received restart response, restarting channel", chid) + err := m.channels.Restart(chid) + if err != nil { + return err + } + } + } + if response.IsComplete() && response.Accepted() { + if !response.IsPaused() { + log.Infow("received complete response,responder not paused, completing channel", "chid", chid) + return m.channels.ResponderCompletes(chid) + } + + log.Infow("received complete response, responder is paused, not completing channel", "chid", chid) + err := m.channels.ResponderBeginsFinalization(chid) + if err != nil { + return nil + } + } + if response.IsPaused() { + return m.pauseOther(chid) + } + return m.resumeOther(chid) +} + +func (m *manager) OnRequestCancelled(chid datatransfer.ChannelID, err error) error { + log.Warnf("channel %+v was cancelled: %s", chid, err) + return m.channels.RequestCancelled(chid, err) +} + +func (m *manager) OnRequestDisconnected(chid datatransfer.ChannelID, err error) error { + log.Warnf("channel %+v has stalled or disconnected: %s", chid, err) + return m.channels.Disconnected(chid, err) +} + +func (m *manager) OnSendDataError(chid datatransfer.ChannelID, err error) error { + log.Debugf("channel %+v had transport send error: %s", chid, err) + return m.channels.SendDataError(chid, err) +} + +func (m *manager) OnReceiveDataError(chid datatransfer.ChannelID, err error) error { + log.Debugf("channel %+v had transport receive error: %s", chid, err) + return m.channels.ReceiveDataError(chid, err) +} + +// OnChannelCompleted is called +// - by the requester when all data for a transfer has been received +// - by the responder when all data for a transfer has been sent +func (m *manager) OnChannelCompleted(chid datatransfer.ChannelID, completeErr error) error { + // If the channel completed successfully + if completeErr == nil { + // If the channel was initiated by the other peer + if chid.Initiator != m.peerID { + log.Infow("received OnChannelCompleted, will send completion message to initiator", "chid", chid) + msg, err := m.completeMessage(chid) + if err != nil { + return err + } + if msg != nil { + // Send the other peer a message that the transfer has completed + log.Infow("sending completion message to initiator", "chid", chid) + ctx, _ := m.spansIndex.SpanForChannel(context.Background(), chid) + if err := m.dataTransferNetwork.SendMessage(ctx, chid.Initiator, msg); err != nil { + err := xerrors.Errorf("channel %s: failed to send completion message to initiator: %w", chid, err) + log.Warnw("failed to send completion message to initiator", "chid", chid, "err", err) + return m.OnRequestDisconnected(chid, err) + } + log.Infow("successfully sent completion message to initiator", "chid", chid) + } + if msg.Accepted() { + if msg.IsPaused() { + return m.channels.BeginFinalizing(chid) + } + return m.channels.Complete(chid) + } + return m.channels.Error(chid, err) + } + + // The channel was initiated by this node, so move to the finished state + log.Infof("channel %s: transfer initiated by local node is complete", chid) + return m.channels.FinishTransfer(chid) + } + + // There was an error so fire an Error event + chst, err := m.channels.GetByID(context.TODO(), chid) + if err != nil { + return err + } + // send an error, but only if we haven't already errored for some reason + if chst.Status() != datatransfer.Failing && chst.Status() != datatransfer.Failed { + err := xerrors.Errorf("data transfer channel %s failed to transfer data: %w", chid, completeErr) + log.Warnf(err.Error()) + return m.channels.Error(chid, err) + } + return nil +} + +func (m *manager) OnContextAugment(chid datatransfer.ChannelID) func(context.Context) context.Context { + return func(ctx context.Context) context.Context { + ctx, _ = m.spansIndex.SpanForChannel(ctx, chid) + return ctx + } +} + +func (m *manager) receiveRestartRequest(chid datatransfer.ChannelID, incoming datatransfer.Request) (datatransfer.Response, error) { + log.Infof("channel %s: received restart request", chid) + + result, err := m.restartRequest(chid, incoming) + msg, msgErr := m.response(true, false, err, incoming.TransferID(), result) + if msgErr != nil { + return nil, msgErr + } + return msg, err +} + +func (m *manager) receiveNewRequest(chid datatransfer.ChannelID, incoming datatransfer.Request) (datatransfer.Response, error) { + log.Infof("channel %s: received new channel request from %s", chid, chid.Initiator) + + result, err := m.acceptRequest(chid, incoming) + msg, msgErr := m.response(false, true, err, incoming.TransferID(), result) + if msgErr != nil { + return nil, msgErr + } + return msg, err +} + +func (m *manager) restartRequest(chid datatransfer.ChannelID, + incoming datatransfer.Request) (datatransfer.VoucherResult, error) { + + initiator := chid.Initiator + if m.peerID == initiator { + return nil, xerrors.New("initiator cannot be manager peer for a restart request") + } + + if err := m.validateRestartRequest(context.Background(), initiator, chid, incoming); err != nil { + return nil, xerrors.Errorf("restart request for channel %s failed validation: %w", chid, err) + } + + stor, err := incoming.Selector() + if err != nil { + return nil, err + } + + voucher, result, err := m.validateVoucher(true, chid, initiator, incoming, incoming.IsPull(), incoming.BaseCid(), stor) + if err != nil && err != datatransfer.ErrPause { + return result, xerrors.Errorf("failed to validate voucher: %w", err) + } + voucherErr := err + + if result != nil { + err := m.channels.NewVoucherResult(chid, result) + if err != nil { + return result, err + } + } + if err := m.channels.Restart(chid); err != nil { + return result, xerrors.Errorf("failed to restart channel %s: %w", chid, err) + } + processor, has := m.transportConfigurers.Processor(voucher.Type()) + if has { + transportConfigurer := processor.(datatransfer.TransportConfigurer) + transportConfigurer(chid, voucher, m.transport) + } + m.dataTransferNetwork.Protect(initiator, chid.String()) + if voucherErr == datatransfer.ErrPause { + err := m.channels.PauseResponder(chid) + if err != nil { + return result, err + } + } + return result, voucherErr +} + +func (m *manager) acceptRequest(chid datatransfer.ChannelID, incoming datatransfer.Request) (datatransfer.VoucherResult, error) { + + stor, err := incoming.Selector() + if err != nil { + return nil, err + } + + voucher, result, err := m.validateVoucher(false, chid, chid.Initiator, incoming, incoming.IsPull(), incoming.BaseCid(), stor) + if err != nil && err != datatransfer.ErrPause { + return result, err + } + voucherErr := err + + var dataSender, dataReceiver peer.ID + if incoming.IsPull() { + dataSender = m.peerID + dataReceiver = chid.Initiator + } else { + dataSender = chid.Initiator + dataReceiver = m.peerID + } + + log.Infow("data-transfer request validated, will create & start tracking channel", "channelID", chid, "payloadCid", incoming.BaseCid()) + _, err = m.channels.CreateNew(m.peerID, incoming.TransferID(), incoming.BaseCid(), stor, voucher, chid.Initiator, dataSender, dataReceiver) + if err != nil { + log.Errorw("failed to create and start tracking channel", "channelID", chid, "err", err) + return result, err + } + log.Debugw("successfully created and started tracking channel", "channelID", chid) + if result != nil { + err := m.channels.NewVoucherResult(chid, result) + if err != nil { + return result, err + } + } + if err := m.channels.Accept(chid); err != nil { + return result, err + } + processor, has := m.transportConfigurers.Processor(voucher.Type()) + if has { + transportConfigurer := processor.(datatransfer.TransportConfigurer) + transportConfigurer(chid, voucher, m.transport) + } + m.dataTransferNetwork.Protect(chid.Initiator, chid.String()) + if voucherErr == datatransfer.ErrPause { + err := m.channels.PauseResponder(chid) + if err != nil { + return result, err + } + } + return result, voucherErr +} + +// validateVoucher converts a voucher in an incoming message to its appropriate +// voucher struct, then runs the validator and returns the results. +// returns error if: +// - reading voucher fails +// - deserialization of selector fails +// - validation fails +func (m *manager) validateVoucher( + isRestart bool, + chid datatransfer.ChannelID, + sender peer.ID, + incoming datatransfer.Request, + isPull bool, + baseCid cid.Cid, + stor ipld.Node, +) (datatransfer.Voucher, datatransfer.VoucherResult, error) { + vouch, err := m.decodeVoucher(incoming, m.validatedTypes) + if err != nil { + return nil, nil, err + } + var validatorFunc func(bool, datatransfer.ChannelID, peer.ID, datatransfer.Voucher, cid.Cid, ipld.Node) (datatransfer.VoucherResult, error) + processor, _ := m.validatedTypes.Processor(vouch.Type()) + validator := processor.(datatransfer.RequestValidator) + if isPull { + validatorFunc = validator.ValidatePull + } else { + validatorFunc = validator.ValidatePush + } + + result, err := validatorFunc(isRestart, chid, sender, vouch, baseCid, stor) + return vouch, result, err +} + +// revalidateVoucher converts a voucher in an incoming message to its appropriate +// voucher struct, then runs the revalidator and returns the results. +// returns error if: +// - reading voucher fails +// - deserialization of selector fails +// - validation fails +func (m *manager) revalidateVoucher(chid datatransfer.ChannelID, + incoming datatransfer.Request) (datatransfer.Voucher, datatransfer.VoucherResult, error) { + vouch, err := m.decodeVoucher(incoming, m.revalidators) + if err != nil { + return nil, nil, err + } + processor, _ := m.revalidators.Processor(vouch.Type()) + validator := processor.(datatransfer.Revalidator) + + result, err := validator.Revalidate(chid, vouch) + return vouch, result, err +} + +func (m *manager) processUpdateVoucher(chid datatransfer.ChannelID, request datatransfer.Request) (datatransfer.Response, error) { + vouch, result, voucherErr := m.revalidateVoucher(chid, request) + if vouch != nil { + err := m.channels.NewVoucher(chid, vouch) + if err != nil { + return nil, err + } + } + return m.processRevalidationResult(chid, result, voucherErr) +} + +func (m *manager) revalidationResponse(chid datatransfer.ChannelID, result datatransfer.VoucherResult, resultErr error) (datatransfer.Response, error) { + chst, err := m.channels.GetByID(context.TODO(), chid) + if err != nil { + return nil, err + } + if chst.Status() == datatransfer.Finalizing { + return m.completeResponse(resultErr, chid.ID, result) + } + return m.response(false, false, resultErr, chid.ID, result) +} + +func (m *manager) processRevalidationResult(chid datatransfer.ChannelID, result datatransfer.VoucherResult, resultErr error) (datatransfer.Response, error) { + vresMessage, err := m.revalidationResponse(chid, result, resultErr) + + if err != nil { + return nil, err + } + if result != nil { + err := m.channels.NewVoucherResult(chid, result) + if err != nil { + return nil, err + } + } + + if resultErr == nil { + return vresMessage, nil + } + + if resultErr == datatransfer.ErrPause { + err := m.pause(chid) + if err != nil { + return nil, err + } + return vresMessage, datatransfer.ErrPause + } + + if resultErr == datatransfer.ErrResume { + err = m.resume(chid) + if err != nil { + return nil, err + } + return vresMessage, datatransfer.ErrResume + } + return vresMessage, resultErr +} + +func (m *manager) completeMessage(chid datatransfer.ChannelID) (datatransfer.Response, error) { + var result datatransfer.VoucherResult + var resultErr error + var handled bool + _ = m.revalidators.Each(func(_ datatransfer.TypeIdentifier, _ encoding.Decoder, processor registry.Processor) error { + revalidator := processor.(datatransfer.Revalidator) + handled, result, resultErr = revalidator.OnComplete(chid) + if handled { + return errors.New("stop processing") + } + return nil + }) + if result != nil { + err := m.channels.NewVoucherResult(chid, result) + if err != nil { + return nil, err + } + } + + return m.completeResponse(resultErr, chid.ID, result) +} diff --git a/datatransfer/impl/impl.go b/datatransfer/impl/impl.go new file mode 100644 index 000000000..c58ef8fa1 --- /dev/null +++ b/datatransfer/impl/impl.go @@ -0,0 +1,561 @@ +package impl + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/hannahhoward/go-pubsub" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + logging "github.com/ipfs/go-log/v2" + "github.com/ipld/go-ipld-prime" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/libp2p/go-libp2p/core/peer" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" + "golang.org/x/xerrors" + + "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/datatransfer/channelmonitor" + "github.com/filecoin-project/boost/datatransfer/channels" + "github.com/filecoin-project/boost/datatransfer/encoding" + "github.com/filecoin-project/boost/datatransfer/message" + "github.com/filecoin-project/boost/datatransfer/network" + "github.com/filecoin-project/boost/datatransfer/registry" + "github.com/filecoin-project/boost/datatransfer/tracing" +) + +var log = logging.Logger("dt-impl") +var cancelSendTimeout = 30 * time.Second + +type manager struct { + dataTransferNetwork network.DataTransferNetwork + validatedTypes *registry.Registry + resultTypes *registry.Registry + revalidators *registry.Registry + transportConfigurers *registry.Registry + pubSub *pubsub.PubSub + readySub *pubsub.PubSub + channels *channels.Channels + peerID peer.ID + transport datatransfer.Transport + channelMonitor *channelmonitor.Monitor + channelMonitorCfg *channelmonitor.Config + transferIDGen *timeCounter + spansIndex *tracing.SpansIndex +} + +type internalEvent struct { + evt datatransfer.Event + state datatransfer.ChannelState +} + +func dispatcher(evt pubsub.Event, subscriberFn pubsub.SubscriberFn) error { + ie, ok := evt.(internalEvent) + if !ok { + return errors.New("wrong type of event") + } + cb, ok := subscriberFn.(datatransfer.Subscriber) + if !ok { + return errors.New("wrong type of event") + } + cb(ie.evt, ie.state) + return nil +} + +func readyDispatcher(evt pubsub.Event, fn pubsub.SubscriberFn) error { + migrateErr, ok := evt.(error) + if !ok && evt != nil { + return errors.New("wrong type of event") + } + cb, ok := fn.(datatransfer.ReadyFunc) + if !ok { + return errors.New("wrong type of event") + } + cb(migrateErr) + return nil +} + +// DataTransferOption configures the data transfer manager +type DataTransferOption func(*manager) + +// ChannelRestartConfig sets the configuration options for automatically +// restarting push and pull channels +func ChannelRestartConfig(cfg channelmonitor.Config) DataTransferOption { + return func(m *manager) { + m.channelMonitorCfg = &cfg + } +} + +// NewDataTransfer initializes a new instance of a data transfer manager +func NewDataTransfer(ds datastore.Batching, dataTransferNetwork network.DataTransferNetwork, transport datatransfer.Transport, options ...DataTransferOption) (datatransfer.Manager, error) { + m := &manager{ + dataTransferNetwork: dataTransferNetwork, + validatedTypes: registry.NewRegistry(), + resultTypes: registry.NewRegistry(), + revalidators: registry.NewRegistry(), + transportConfigurers: registry.NewRegistry(), + pubSub: pubsub.New(dispatcher), + readySub: pubsub.New(readyDispatcher), + peerID: dataTransferNetwork.ID(), + transport: transport, + transferIDGen: newTimeCounter(), + spansIndex: tracing.NewSpansIndex(), + } + + channels, err := channels.New(ds, m.notifier, m.voucherDecoder, m.resultTypes.Decoder, &channelEnvironment{m}, dataTransferNetwork.ID()) + if err != nil { + return nil, err + } + m.channels = channels + + // Apply config options + for _, option := range options { + option(m) + } + + // Create push / pull channel monitor after applying config options as the config + // options may apply to the monitor + m.channelMonitor = channelmonitor.NewMonitor(m, m.channelMonitorCfg) + + return m, nil +} + +func (m *manager) voucherDecoder(voucherType datatransfer.TypeIdentifier) (encoding.Decoder, bool) { + decoder, has := m.validatedTypes.Decoder(voucherType) + if !has { + return m.revalidators.Decoder(voucherType) + } + return decoder, true +} + +func (m *manager) notifier(evt datatransfer.Event, chst datatransfer.ChannelState) { + err := m.pubSub.Publish(internalEvent{evt, chst}) + if err != nil { + log.Warnf("err publishing DT event: %s", err.Error()) + } +} + +// Start initializes data transfer processing +func (m *manager) Start(ctx context.Context) error { + log.Info("start data-transfer module") + + go func() { + err := m.channels.Start(ctx) + if err != nil { + log.Errorf("Migrating data transfer state machines: %s", err.Error()) + } + err = m.readySub.Publish(err) + if err != nil { + log.Warnf("Publish data transfer ready event: %s", err.Error()) + } + }() + + dtReceiver := &receiver{m} + m.dataTransferNetwork.SetDelegate(dtReceiver) + return m.transport.SetEventHandler(m) +} + +// OnReady registers a listener for when the data transfer manager has finished starting up +func (m *manager) OnReady(ready datatransfer.ReadyFunc) { + m.readySub.Subscribe(ready) +} + +// Stop terminates all data transfers and ends processing +func (m *manager) Stop(ctx context.Context) error { + log.Info("stop data-transfer module") + m.channelMonitor.Shutdown() + m.spansIndex.EndAll() + return m.transport.Shutdown(ctx) +} + +// RegisterVoucherType registers a validator for the given voucher type +// returns error if: +// * voucher type does not implement voucher +// * there is a voucher type registered with an identical identifier +// * voucherType's Kind is not reflect.Ptr +func (m *manager) RegisterVoucherType(voucherType datatransfer.Voucher, validator datatransfer.RequestValidator) error { + err := m.validatedTypes.Register(voucherType, validator) + if err != nil { + return xerrors.Errorf("error registering voucher type: %w", err) + } + return nil +} + +// OpenPushDataChannel opens a data transfer that will send data to the recipient peer and +// transfer parts of the piece that match the selector +func (m *manager) OpenPushDataChannel(ctx context.Context, requestTo peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) (datatransfer.ChannelID, error) { + log.Infof("open push channel to %s with base cid %s", requestTo, baseCid) + + req, err := m.newRequest(ctx, selector, false, voucher, baseCid, requestTo) + if err != nil { + return datatransfer.ChannelID{}, err + } + + chid, err := m.channels.CreateNew(m.peerID, req.TransferID(), baseCid, selector, voucher, + m.peerID, m.peerID, requestTo) // initiator = us, sender = us, receiver = them + if err != nil { + return chid, err + } + ctx, span := m.spansIndex.SpanForChannel(ctx, chid) + processor, has := m.transportConfigurers.Processor(voucher.Type()) + if has { + transportConfigurer := processor.(datatransfer.TransportConfigurer) + transportConfigurer(chid, voucher, m.transport) + } + m.dataTransferNetwork.Protect(requestTo, chid.String()) + monitoredChan := m.channelMonitor.AddPushChannel(chid) + if err := m.dataTransferNetwork.SendMessage(ctx, requestTo, req); err != nil { + err = fmt.Errorf("Unable to send request: %w", err) + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + _ = m.channels.Error(chid, err) + + // If push channel monitoring is enabled, shutdown the monitor as it + // wasn't possible to start the data transfer + if monitoredChan != nil { + monitoredChan.Shutdown() + } + + return chid, err + } + + return chid, nil +} + +// OpenPullDataChannel opens a data transfer that will request data from the sending peer and +// transfer parts of the piece that match the selector +func (m *manager) OpenPullDataChannel(ctx context.Context, requestTo peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) (datatransfer.ChannelID, error) { + log.Infof("open pull channel to %s with base cid %s", requestTo, baseCid) + + req, err := m.newRequest(ctx, selector, true, voucher, baseCid, requestTo) + if err != nil { + return datatransfer.ChannelID{}, err + } + // initiator = us, sender = them, receiver = us + chid, err := m.channels.CreateNew(m.peerID, req.TransferID(), baseCid, selector, voucher, + m.peerID, requestTo, m.peerID) + if err != nil { + return chid, err + } + ctx, span := m.spansIndex.SpanForChannel(ctx, chid) + processor, has := m.transportConfigurers.Processor(voucher.Type()) + if has { + transportConfigurer := processor.(datatransfer.TransportConfigurer) + transportConfigurer(chid, voucher, m.transport) + } + m.dataTransferNetwork.Protect(requestTo, chid.String()) + monitoredChan := m.channelMonitor.AddPullChannel(chid) + if err := m.transport.OpenChannel(ctx, requestTo, chid, cidlink.Link{Cid: baseCid}, selector, nil, req); err != nil { + err = fmt.Errorf("Unable to send request: %w", err) + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + _ = m.channels.Error(chid, err) + + // If pull channel monitoring is enabled, shutdown the monitor as it + // wasn't possible to start the data transfer + if monitoredChan != nil { + monitoredChan.Shutdown() + } + return chid, err + } + return chid, nil +} + +// SendVoucher sends an intermediate voucher as needed when the receiver sends a request for revalidation +func (m *manager) SendVoucher(ctx context.Context, channelID datatransfer.ChannelID, voucher datatransfer.Voucher) error { + chst, err := m.channels.GetByID(ctx, channelID) + if err != nil { + return err + } + ctx, _ = m.spansIndex.SpanForChannel(ctx, channelID) + ctx, span := otel.Tracer("data-transfer").Start(ctx, "sendVoucher", trace.WithAttributes( + attribute.String("channelID", channelID.String()), + attribute.String("voucherType", string(voucher.Type())), + )) + defer span.End() + if channelID.Initiator != m.peerID { + err := errors.New("cannot send voucher for request we did not initiate") + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + return err + } + updateRequest, err := message.VoucherRequest(channelID.ID, voucher.Type(), voucher) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + return err + } + if err := m.dataTransferNetwork.SendMessage(ctx, chst.OtherPeer(), updateRequest); err != nil { + err = fmt.Errorf("Unable to send request: %w", err) + _ = m.OnRequestDisconnected(channelID, err) + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + return err + } + return m.channels.NewVoucher(channelID, voucher) +} + +// close an open channel (effectively a cancel) +func (m *manager) CloseDataTransferChannel(ctx context.Context, chid datatransfer.ChannelID) error { + log.Infof("close channel %s", chid) + + chst, err := m.channels.GetByID(ctx, chid) + if err != nil { + return err + } + ctx, _ = m.spansIndex.SpanForChannel(ctx, chid) + ctx, span := otel.Tracer("data-transfer").Start(ctx, "closeChannel", trace.WithAttributes( + attribute.String("channelID", chid.String()), + )) + defer span.End() + // Close the channel on the local transport + err = m.transport.CloseChannel(ctx, chid) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + log.Warnf("unable to close channel %s: %s", chid, err) + } + + // Send a cancel message to the remote peer async + go func() { + sctx, cancel := context.WithTimeout(context.Background(), cancelSendTimeout) + defer cancel() + log.Infof("%s: sending cancel channel to %s for channel %s", m.peerID, chst.OtherPeer(), chid) + err = m.dataTransferNetwork.SendMessage(sctx, chst.OtherPeer(), m.cancelMessage(chid)) + if err != nil { + err = fmt.Errorf("unable to send cancel message for channel %s to peer %s: %w", + chid, m.peerID, err) + _ = m.OnRequestDisconnected(chid, err) + log.Warn(err) + } + }() + + // Fire a cancel event + fsmerr := m.channels.Cancel(chid) + if fsmerr != nil { + return xerrors.Errorf("unable to send cancel to channel FSM: %w", fsmerr) + } + + return nil +} + +// ConnectTo opens a connection to a peer on the data-transfer protocol, +// retrying if necessary +func (m *manager) ConnectTo(ctx context.Context, p peer.ID) error { + return m.dataTransferNetwork.ConnectWithRetry(ctx, p) +} + +// close an open channel and fire an error event +func (m *manager) CloseDataTransferChannelWithError(ctx context.Context, chid datatransfer.ChannelID, cherr error) error { + log.Infof("close channel %s with error %s", chid, cherr) + + chst, err := m.channels.GetByID(ctx, chid) + if err != nil { + return err + } + ctx, _ = m.spansIndex.SpanForChannel(ctx, chid) + ctx, span := otel.Tracer("data-transfer").Start(ctx, "closeChannel", trace.WithAttributes( + attribute.String("channelID", chid.String()), + )) + defer span.End() + + // Cancel the channel on the local transport + err = m.transport.CloseChannel(ctx, chid) + if err != nil { + log.Warnf("unable to close channel %s: %s", chid, err) + } + + // Try to send a cancel message to the remote peer. It's quite likely + // we aren't able to send the message to the peer because the channel + // is already in an error state, which is probably because of connection + // issues, so if we cant send the message just log a warning. + log.Infof("%s: sending cancel channel to %s for channel %s", m.peerID, chst.OtherPeer(), chid) + err = m.dataTransferNetwork.SendMessage(ctx, chst.OtherPeer(), m.cancelMessage(chid)) + if err != nil { + // Just log a warning here because it's important that we fire the + // error event with the original error so that it doesn't get masked + // by subsequent errors. + log.Warnf("unable to send cancel message for channel %s to peer %s: %w", + chid, m.peerID, err) + } + + // Fire an error event + err = m.channels.Error(chid, cherr) + if err != nil { + return xerrors.Errorf("unable to send error %s to channel FSM: %w", cherr, err) + } + + return nil +} + +// pause a running data transfer channel +func (m *manager) PauseDataTransferChannel(ctx context.Context, chid datatransfer.ChannelID) error { + log.Infof("pause channel %s", chid) + + pausable, ok := m.transport.(datatransfer.PauseableTransport) + if !ok { + return datatransfer.ErrUnsupported + } + + ctx, _ = m.spansIndex.SpanForChannel(ctx, chid) + + err := pausable.PauseChannel(ctx, chid) + if err != nil { + log.Warnf("Error attempting to pause at transport level: %s", err.Error()) + } + + if err := m.dataTransferNetwork.SendMessage(ctx, chid.OtherParty(m.peerID), m.pauseMessage(chid)); err != nil { + err = fmt.Errorf("Unable to send pause message: %w", err) + _ = m.OnRequestDisconnected(chid, err) + return err + } + + return m.pause(chid) +} + +// resume a running data transfer channel +func (m *manager) ResumeDataTransferChannel(ctx context.Context, chid datatransfer.ChannelID) error { + log.Infof("resume channel %s", chid) + + pausable, ok := m.transport.(datatransfer.PauseableTransport) + if !ok { + return datatransfer.ErrUnsupported + } + + ctx, _ = m.spansIndex.SpanForChannel(ctx, chid) + + err := pausable.ResumeChannel(ctx, m.resumeMessage(chid), chid) + if err != nil { + log.Warnf("Error attempting to resume at transport level: %s", err.Error()) + } + + return m.resume(chid) +} + +// get channel state +func (m *manager) ChannelState(ctx context.Context, chid datatransfer.ChannelID) (datatransfer.ChannelState, error) { + return m.channels.GetByID(ctx, chid) +} + +// get status of a transfer +func (m *manager) TransferChannelStatus(ctx context.Context, chid datatransfer.ChannelID) datatransfer.Status { + chst, err := m.channels.GetByID(ctx, chid) + if err != nil { + return datatransfer.ChannelNotFoundError + } + return chst.Status() +} + +// get notified when certain types of events happen +func (m *manager) SubscribeToEvents(subscriber datatransfer.Subscriber) datatransfer.Unsubscribe { + return datatransfer.Unsubscribe(m.pubSub.Subscribe(subscriber)) +} + +// get all in progress transfers +func (m *manager) InProgressChannels(ctx context.Context) (map[datatransfer.ChannelID]datatransfer.ChannelState, error) { + return m.channels.InProgress() +} + +// RegisterRevalidator registers a revalidator for the given voucher type +// Note: this is the voucher type used to revalidate. It can share a name +// with the initial validator type and CAN be the same type, or a different type. +// The revalidator can simply be the sampe as the original request validator, +// or a different validator that satisfies the revalidator interface. +func (m *manager) RegisterRevalidator(voucherType datatransfer.Voucher, revalidator datatransfer.Revalidator) error { + err := m.revalidators.Register(voucherType, revalidator) + if err != nil { + return xerrors.Errorf("error registering revalidator type: %w", err) + } + return nil +} + +// RegisterVoucherResultType allows deserialization of a voucher result, +// so that a listener can read the metadata +func (m *manager) RegisterVoucherResultType(resultType datatransfer.VoucherResult) error { + err := m.resultTypes.Register(resultType, nil) + if err != nil { + return xerrors.Errorf("error registering voucher type: %w", err) + } + return nil +} + +// RegisterTransportConfigurer registers the given transport configurer to be run on requests with the given voucher +// type +func (m *manager) RegisterTransportConfigurer(voucherType datatransfer.Voucher, configurer datatransfer.TransportConfigurer) error { + err := m.transportConfigurers.Register(voucherType, configurer) + if err != nil { + return xerrors.Errorf("error registering transport configurer: %w", err) + } + return nil +} + +// RestartDataTransferChannel restarts data transfer on the channel with the given channelId +func (m *manager) RestartDataTransferChannel(ctx context.Context, chid datatransfer.ChannelID) error { + log.Infof("restart channel %s", chid) + + channel, err := m.channels.GetByID(ctx, chid) + if err != nil { + return xerrors.Errorf("failed to fetch channel: %w", err) + } + + // if channel has already been completed, there is nothing to do. + // TODO We could be in a state where the channel has completed but the corresponding event hasnt fired in the client/provider. + if channels.IsChannelTerminated(channel.Status()) { + return nil + } + + // if channel is is cleanup state, finish it + if channels.IsChannelCleaningUp(channel.Status()) { + return m.channels.CompleteCleanupOnRestart(channel.ChannelID()) + } + + ctx, _ = m.spansIndex.SpanForChannel(ctx, chid) + ctx, span := otel.Tracer("data-transfer").Start(ctx, "restartChannel", trace.WithAttributes( + attribute.String("channelID", chid.String()), + )) + defer span.End() + // initiate restart + chType := m.channelDataTransferType(channel) + switch chType { + case ManagerPeerReceivePush: + return m.restartManagerPeerReceivePush(ctx, channel) + case ManagerPeerReceivePull: + return m.restartManagerPeerReceivePull(ctx, channel) + case ManagerPeerCreatePull: + return m.openPullRestartChannel(ctx, channel) + case ManagerPeerCreatePush: + return m.openPushRestartChannel(ctx, channel) + } + + return nil +} + +func (m *manager) channelDataTransferType(channel datatransfer.ChannelState) ChannelDataTransferType { + initiator := channel.ChannelID().Initiator + if channel.IsPull() { + // we created a pull channel + if initiator == m.peerID { + return ManagerPeerCreatePull + } + + // we received a pull channel + return ManagerPeerReceivePull + } + + // we created a push channel + if initiator == m.peerID { + return ManagerPeerCreatePush + } + + // we received a push channel + return ManagerPeerReceivePush +} + +func (m *manager) PeerID() peer.ID { + return m.peerID +} diff --git a/datatransfer/impl/receiver.go b/datatransfer/impl/receiver.go new file mode 100644 index 000000000..02f0402b9 --- /dev/null +++ b/datatransfer/impl/receiver.go @@ -0,0 +1,192 @@ +package impl + +import ( + "context" + + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/libp2p/go-libp2p/core/peer" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + + "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/datatransfer/channels" +) + +type receiver struct { + manager *manager +} + +// ReceiveRequest takes an incoming data transfer request, validates the voucher and +// processes the message. +func (r *receiver) ReceiveRequest( + ctx context.Context, + initiator peer.ID, + incoming datatransfer.Request) { + err := r.receiveRequest(ctx, initiator, incoming) + if err != nil { + log.Warnf("error processing request from %s: %s", initiator, err) + } +} + +func (r *receiver) receiveRequest(ctx context.Context, initiator peer.ID, incoming datatransfer.Request) error { + chid := datatransfer.ChannelID{Initiator: initiator, Responder: r.manager.peerID, ID: incoming.TransferID()} + ctx, _ = r.manager.spansIndex.SpanForChannel(ctx, chid) + ctx, span := otel.Tracer("data-transfer").Start(ctx, "receiveRequest", trace.WithAttributes( + attribute.String("channelID", chid.String()), + attribute.String("baseCid", incoming.BaseCid().String()), + attribute.Bool("isNew", incoming.IsNew()), + attribute.Bool("isRestart", incoming.IsRestart()), + attribute.Bool("isUpdate", incoming.IsUpdate()), + attribute.Bool("isCancel", incoming.IsCancel()), + attribute.Bool("isPaused", incoming.IsPaused()), + )) + defer span.End() + response, receiveErr := r.manager.OnRequestReceived(chid, incoming) + + if receiveErr == datatransfer.ErrResume { + chst, err := r.manager.channels.GetByID(ctx, chid) + if err != nil { + return err + } + if resumeTransportStatesResponder.Contains(chst.Status()) { + return r.manager.transport.(datatransfer.PauseableTransport).ResumeChannel(ctx, response, chid) + } + receiveErr = nil + } + + if response != nil { + if (response.IsNew() || response.IsRestart()) && response.Accepted() && !incoming.IsPull() { + var channel datatransfer.ChannelState + if response.IsRestart() { + var err error + channel, err = r.manager.channels.GetByID(ctx, chid) + if err != nil { + return err + } + } + + stor, _ := incoming.Selector() + if err := r.manager.transport.OpenChannel(ctx, initiator, chid, cidlink.Link{Cid: incoming.BaseCid()}, stor, channel, response); err != nil { + return err + } + } else { + if err := r.manager.dataTransferNetwork.SendMessage(ctx, initiator, response); err != nil { + return err + } + } + } + + if receiveErr == datatransfer.ErrPause { + return r.manager.transport.(datatransfer.PauseableTransport).PauseChannel(ctx, chid) + } + + if receiveErr != nil { + _ = r.manager.transport.CloseChannel(ctx, chid) + return receiveErr + } + + return nil +} + +// ReceiveResponse handles responses to our Push or Pull data transfer request. +// It schedules a transfer only if our Pull Request is accepted. +func (r *receiver) ReceiveResponse( + ctx context.Context, + sender peer.ID, + incoming datatransfer.Response) { + err := r.receiveResponse(ctx, sender, incoming) + if err != nil { + log.Error(err) + } +} +func (r *receiver) receiveResponse( + ctx context.Context, + sender peer.ID, + incoming datatransfer.Response) error { + chid := datatransfer.ChannelID{Initiator: r.manager.peerID, Responder: sender, ID: incoming.TransferID()} + ctx, _ = r.manager.spansIndex.SpanForChannel(ctx, chid) + ctx, span := otel.Tracer("data-transfer").Start(ctx, "receiveResponse", trace.WithAttributes( + attribute.String("channelID", chid.String()), + attribute.Bool("accepted", incoming.Accepted()), + attribute.Bool("isComplete", incoming.IsComplete()), + attribute.Bool("isNew", incoming.IsNew()), + attribute.Bool("isRestart", incoming.IsRestart()), + attribute.Bool("isUpdate", incoming.IsUpdate()), + attribute.Bool("isCancel", incoming.IsCancel()), + attribute.Bool("isPaused", incoming.IsPaused()), + )) + defer span.End() + err := r.manager.OnResponseReceived(chid, incoming) + if err == datatransfer.ErrPause { + return r.manager.transport.(datatransfer.PauseableTransport).PauseChannel(ctx, chid) + } + if err != nil { + log.Warnf("closing channel %s after getting error processing response from %s: %s", + chid, sender, err) + + _ = r.manager.transport.CloseChannel(ctx, chid) + return err + } + return nil +} + +func (r *receiver) ReceiveError(err error) { + log.Errorf("received error message on data transfer: %s", err.Error()) +} + +func (r *receiver) ReceiveRestartExistingChannelRequest(ctx context.Context, + sender peer.ID, + incoming datatransfer.Request) { + + ch, err := incoming.RestartChannelId() + if err != nil { + log.Errorf("cannot restart channel: failed to fetch channel Id: %w", err) + return + } + + ctx, _ = r.manager.spansIndex.SpanForChannel(ctx, ch) + ctx, span := otel.Tracer("data-transfer").Start(ctx, "receiveRequest", trace.WithAttributes( + attribute.String("channelID", ch.String()), + )) + defer span.End() + log.Infof("channel %s: received restart existing channel request from %s", ch, sender) + + // validate channel exists -> in non-terminal state and that the sender matches + channel, err := r.manager.channels.GetByID(ctx, ch) + if err != nil || channel == nil { + // nothing to do here, we wont handle the request + return + } + + // initiator should be me + if channel.ChannelID().Initiator != r.manager.peerID { + log.Errorf("cannot restart channel %s: channel initiator is not the manager peer", ch) + return + } + + // other peer should be the counter party on the channel + if channel.OtherPeer() != sender { + log.Errorf("cannot restart channel %s: channel counterparty is not the sender peer", ch) + return + } + + // channel should NOT be terminated + if channels.IsChannelTerminated(channel.Status()) { + log.Errorf("cannot restart channel %s: channel already terminated", ch) + return + } + + switch r.manager.channelDataTransferType(channel) { + case ManagerPeerCreatePush: + if err := r.manager.openPushRestartChannel(ctx, channel); err != nil { + log.Errorf("failed to open push restart channel %s: %s", ch, err) + } + case ManagerPeerCreatePull: + if err := r.manager.openPullRestartChannel(ctx, channel); err != nil { + log.Errorf("failed to open pull restart channel %s: %s", ch, err) + } + default: + log.Error("peer is not the creator of the channel") + } +} diff --git a/datatransfer/impl/restart.go b/datatransfer/impl/restart.go new file mode 100644 index 000000000..29aaf0404 --- /dev/null +++ b/datatransfer/impl/restart.go @@ -0,0 +1,198 @@ +package impl + +import ( + "bytes" + "context" + + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/libp2p/go-libp2p/core/peer" + "golang.org/x/xerrors" + + "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/datatransfer/channels" + "github.com/filecoin-project/boost/datatransfer/encoding" + "github.com/filecoin-project/boost/datatransfer/message" +) + +// ChannelDataTransferType identifies the type of a data transfer channel for the purposes of a restart +type ChannelDataTransferType int + +const ( + // ManagerPeerCreatePull is the type of a channel wherein the manager peer created a Pull Data Transfer + ManagerPeerCreatePull ChannelDataTransferType = iota + + // ManagerPeerCreatePush is the type of a channel wherein the manager peer created a Push Data Transfer + ManagerPeerCreatePush + + // ManagerPeerReceivePull is the type of a channel wherein the manager peer received a Pull Data Transfer Request + ManagerPeerReceivePull + + // ManagerPeerReceivePush is the type of a channel wherein the manager peer received a Push Data Transfer Request + ManagerPeerReceivePush +) + +func (m *manager) restartManagerPeerReceivePush(ctx context.Context, channel datatransfer.ChannelState) error { + if err := m.validateRestartVoucher(channel, false); err != nil { + return xerrors.Errorf("failed to restart channel, validation error: %w", err) + } + + // send a libp2p message to the other peer asking to send a "restart push request" + req := message.RestartExistingChannelRequest(channel.ChannelID()) + + if err := m.dataTransferNetwork.SendMessage(ctx, channel.OtherPeer(), req); err != nil { + return xerrors.Errorf("unable to send restart request: %w", err) + } + + return nil +} + +func (m *manager) restartManagerPeerReceivePull(ctx context.Context, channel datatransfer.ChannelState) error { + if err := m.validateRestartVoucher(channel, true); err != nil { + return xerrors.Errorf("failed to restart channel, validation error: %w", err) + } + + req := message.RestartExistingChannelRequest(channel.ChannelID()) + + // send a libp2p message to the other peer asking to send a "restart pull request" + if err := m.dataTransferNetwork.SendMessage(ctx, channel.OtherPeer(), req); err != nil { + return xerrors.Errorf("unable to send restart request: %w", err) + } + + return nil +} + +func (m *manager) validateRestartVoucher(channel datatransfer.ChannelState, isPull bool) error { + // re-validate the original voucher received for safety + chid := channel.ChannelID() + + // recreate the request that would have led to this pull channel being created for validation + req, err := message.NewRequest(chid.ID, false, isPull, channel.Voucher().Type(), channel.Voucher(), + channel.BaseCID(), channel.Selector()) + if err != nil { + return err + } + + // revalidate the voucher by reconstructing the request that would have led to the creation of this channel + if _, _, err := m.validateVoucher(true, chid, channel.OtherPeer(), req, isPull, channel.BaseCID(), channel.Selector()); err != nil { + return err + } + + return nil +} + +func (m *manager) openPushRestartChannel(ctx context.Context, channel datatransfer.ChannelState) error { + selector := channel.Selector() + voucher := channel.Voucher() + baseCid := channel.BaseCID() + requestTo := channel.OtherPeer() + chid := channel.ChannelID() + + req, err := message.NewRequest(chid.ID, true, false, voucher.Type(), voucher, baseCid, selector) + if err != nil { + return err + } + + processor, has := m.transportConfigurers.Processor(voucher.Type()) + if has { + transportConfigurer := processor.(datatransfer.TransportConfigurer) + transportConfigurer(chid, voucher, m.transport) + } + m.dataTransferNetwork.Protect(requestTo, chid.String()) + + // Monitor the state of the connection for the channel + monitoredChan := m.channelMonitor.AddPushChannel(chid) + log.Infof("sending push restart channel to %s for channel %s", requestTo, chid) + if err := m.dataTransferNetwork.SendMessage(ctx, requestTo, req); err != nil { + // If push channel monitoring is enabled, shutdown the monitor as it + // wasn't possible to start the data transfer + if monitoredChan != nil { + monitoredChan.Shutdown() + } + + return xerrors.Errorf("Unable to send restart request: %w", err) + } + + return nil +} + +func (m *manager) openPullRestartChannel(ctx context.Context, channel datatransfer.ChannelState) error { + selector := channel.Selector() + voucher := channel.Voucher() + baseCid := channel.BaseCID() + requestTo := channel.OtherPeer() + chid := channel.ChannelID() + + req, err := message.NewRequest(chid.ID, true, true, voucher.Type(), voucher, baseCid, selector) + if err != nil { + return err + } + + processor, has := m.transportConfigurers.Processor(voucher.Type()) + if has { + transportConfigurer := processor.(datatransfer.TransportConfigurer) + transportConfigurer(chid, voucher, m.transport) + } + m.dataTransferNetwork.Protect(requestTo, chid.String()) + + // Monitor the state of the connection for the channel + monitoredChan := m.channelMonitor.AddPullChannel(chid) + log.Infof("sending open channel to %s to restart channel %s", requestTo, chid) + if err := m.transport.OpenChannel(ctx, requestTo, chid, cidlink.Link{Cid: baseCid}, selector, channel, req); err != nil { + // If pull channel monitoring is enabled, shutdown the monitor as it + // wasn't possible to start the data transfer + if monitoredChan != nil { + monitoredChan.Shutdown() + } + + return xerrors.Errorf("Unable to send open channel restart request: %w", err) + } + + return nil +} + +func (m *manager) validateRestartRequest(ctx context.Context, otherPeer peer.ID, chid datatransfer.ChannelID, req datatransfer.Request) error { + // channel should exist + channel, err := m.channels.GetByID(ctx, chid) + if err != nil { + return err + } + + // channel is not terminated + if channels.IsChannelTerminated(channel.Status()) { + return xerrors.New("channel is already terminated") + } + + // channel initator should be the sender peer + if channel.ChannelID().Initiator != otherPeer { + return xerrors.New("other peer is not the initiator of the channel") + } + + // channel and request baseCid should match + if req.BaseCid() != channel.BaseCID() { + return xerrors.New("base cid does not match") + } + + // vouchers should match + reqVoucher, err := m.decodeVoucher(req, m.validatedTypes) + if err != nil { + return xerrors.Errorf("failed to decode request voucher: %w", err) + } + if reqVoucher.Type() != channel.Voucher().Type() { + return xerrors.New("channel and request voucher types do not match") + } + + reqBz, err := encoding.Encode(reqVoucher) + if err != nil { + return xerrors.New("failed to encode request voucher") + } + channelBz, err := encoding.Encode(channel.Voucher()) + if err != nil { + return xerrors.New("failed to encode channel voucher") + } + + if !bytes.Equal(reqBz, channelBz) { + return xerrors.New("channel and request vouchers do not match") + } + + return nil +} diff --git a/datatransfer/impl/timecounter.go b/datatransfer/impl/timecounter.go new file mode 100644 index 000000000..c13904f07 --- /dev/null +++ b/datatransfer/impl/timecounter.go @@ -0,0 +1,21 @@ +package impl + +import ( + "sync/atomic" + "time" +) + +// timeCounter is used to generate a monotonically increasing sequence. +// It starts at the current time, then increments on each call to next. +type timeCounter struct { + counter uint64 +} + +func newTimeCounter() *timeCounter { + return &timeCounter{counter: uint64(time.Now().UnixNano())} +} + +func (tc *timeCounter) next() uint64 { + counter := atomic.AddUint64(&tc.counter, 1) + return counter +} diff --git a/datatransfer/impl/utils.go b/datatransfer/impl/utils.go new file mode 100644 index 000000000..37842a43c --- /dev/null +++ b/datatransfer/impl/utils.go @@ -0,0 +1,140 @@ +package impl + +import ( + "context" + + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/libp2p/go-libp2p/core/peer" + "golang.org/x/xerrors" + + "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/datatransfer/message" + "github.com/filecoin-project/boost/datatransfer/registry" +) + +type statusList []datatransfer.Status + +func (sl statusList) Contains(s datatransfer.Status) bool { + for _, ts := range sl { + if ts == s { + return true + } + } + return false +} + +var resumeTransportStatesResponder = statusList{ + datatransfer.Requested, + datatransfer.Ongoing, + datatransfer.InitiatorPaused, +} + +// newRequest encapsulates message creation +func (m *manager) newRequest(ctx context.Context, selector ipld.Node, isPull bool, voucher datatransfer.Voucher, baseCid cid.Cid, to peer.ID) (datatransfer.Request, error) { + // Generate a new transfer ID for the request + tid := datatransfer.TransferID(m.transferIDGen.next()) + return message.NewRequest(tid, false, isPull, voucher.Type(), voucher, baseCid, selector) +} + +func (m *manager) response(isRestart bool, isNew bool, err error, tid datatransfer.TransferID, voucherResult datatransfer.VoucherResult) (datatransfer.Response, error) { + isAccepted := err == nil || err == datatransfer.ErrPause || err == datatransfer.ErrResume + isPaused := err == datatransfer.ErrPause + resultType := datatransfer.EmptyTypeIdentifier + if voucherResult != nil { + resultType = voucherResult.Type() + } + if isRestart { + return message.RestartResponse(tid, isAccepted, isPaused, resultType, voucherResult) + } + + if isNew { + return message.NewResponse(tid, isAccepted, isPaused, resultType, voucherResult) + } + return message.VoucherResultResponse(tid, isAccepted, isPaused, resultType, voucherResult) +} + +func (m *manager) completeResponse(err error, tid datatransfer.TransferID, voucherResult datatransfer.VoucherResult) (datatransfer.Response, error) { + isAccepted := err == nil || err == datatransfer.ErrPause || err == datatransfer.ErrResume + isPaused := err == datatransfer.ErrPause + resultType := datatransfer.EmptyTypeIdentifier + if voucherResult != nil { + resultType = voucherResult.Type() + } + return message.CompleteResponse(tid, isAccepted, isPaused, resultType, voucherResult) +} + +func (m *manager) resume(chid datatransfer.ChannelID) error { + if chid.Initiator == m.peerID { + return m.channels.ResumeInitiator(chid) + } + return m.channels.ResumeResponder(chid) +} + +func (m *manager) pause(chid datatransfer.ChannelID) error { + if chid.Initiator == m.peerID { + return m.channels.PauseInitiator(chid) + } + return m.channels.PauseResponder(chid) +} + +func (m *manager) resumeOther(chid datatransfer.ChannelID) error { + if chid.Responder == m.peerID { + return m.channels.ResumeInitiator(chid) + } + return m.channels.ResumeResponder(chid) +} + +func (m *manager) pauseOther(chid datatransfer.ChannelID) error { + if chid.Responder == m.peerID { + return m.channels.PauseInitiator(chid) + } + return m.channels.PauseResponder(chid) +} + +func (m *manager) resumeMessage(chid datatransfer.ChannelID) datatransfer.Message { + if chid.Initiator == m.peerID { + return message.UpdateRequest(chid.ID, false) + } + return message.UpdateResponse(chid.ID, false) +} + +func (m *manager) pauseMessage(chid datatransfer.ChannelID) datatransfer.Message { + if chid.Initiator == m.peerID { + return message.UpdateRequest(chid.ID, true) + } + return message.UpdateResponse(chid.ID, true) +} + +func (m *manager) cancelMessage(chid datatransfer.ChannelID) datatransfer.Message { + if chid.Initiator == m.peerID { + return message.CancelRequest(chid.ID) + } + return message.CancelResponse(chid.ID) +} + +func (m *manager) decodeVoucherResult(response datatransfer.Response) (datatransfer.VoucherResult, error) { + vtypStr := datatransfer.TypeIdentifier(response.VoucherResultType()) + decoder, has := m.resultTypes.Decoder(vtypStr) + if !has { + return nil, xerrors.Errorf("unknown voucher result type: %s", vtypStr) + } + encodable, err := response.VoucherResult(decoder) + if err != nil { + return nil, err + } + return encodable.(datatransfer.Registerable), nil +} + +func (m *manager) decodeVoucher(request datatransfer.Request, registry *registry.Registry) (datatransfer.Voucher, error) { + vtypStr := datatransfer.TypeIdentifier(request.VoucherType()) + decoder, has := registry.Decoder(vtypStr) + if !has { + return nil, xerrors.Errorf("unknown voucher type: %s", vtypStr) + } + encodable, err := request.Voucher(decoder) + if err != nil { + return nil, err + } + return encodable.(datatransfer.Registerable), nil +} diff --git a/datatransfer/manager.go b/datatransfer/manager.go new file mode 100644 index 000000000..37dc7afe4 --- /dev/null +++ b/datatransfer/manager.go @@ -0,0 +1,260 @@ +package datatransfer + +import ( + "context" + + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/libp2p/go-libp2p/core/peer" +) + +// RequestValidator is an interface implemented by the client of the +// data transfer module to validate requests +type RequestValidator interface { + // ValidatePush validates a push request received from the peer that will send data + ValidatePush( + isRestart bool, + chid ChannelID, + sender peer.ID, + voucher Voucher, + baseCid cid.Cid, + selector ipld.Node) (VoucherResult, error) + // ValidatePull validates a pull request received from the peer that will receive data + ValidatePull( + isRestart bool, + chid ChannelID, + receiver peer.ID, + voucher Voucher, + baseCid cid.Cid, + selector ipld.Node) (VoucherResult, error) +} + +// Revalidator is a request validator revalidates in progress requests +// by requesting request additional vouchers, and resuming when it receives them +type Revalidator interface { + // Revalidate revalidates a request with a new voucher + Revalidate(channelID ChannelID, voucher Voucher) (VoucherResult, error) + // OnPullDataSent is called on the responder side when more bytes are sent + // for a given pull request. The first value indicates whether the request was + // recognized by this revalidator and should be considered 'handled'. If true, + // the remaining two values are interpreted. If 'false' the request is passed on + // to the next revalidators. + // It should return a VoucherResult + ErrPause to + // request revalidation or nil to continue uninterrupted, + // other errors will terminate the request. + OnPullDataSent(chid ChannelID, additionalBytesSent uint64) (bool, VoucherResult, error) + // OnPushDataReceived is called on the responder side when more bytes are received + // for a given push request. The first value indicates whether the request was + // recognized by this revalidator and should be considered 'handled'. If true, + // the remaining two values are interpreted. If 'false' the request is passed on + // to the next revalidators. It should return a VoucherResult + ErrPause to + // request revalidation or nil to continue uninterrupted, + // other errors will terminate the request + OnPushDataReceived(chid ChannelID, additionalBytesReceived uint64) (bool, VoucherResult, error) + // OnComplete is called to make a final request for revalidation -- often for the + // purpose of settlement. The first value indicates whether the request was + // recognized by this revalidator and should be considered 'handled'. If true, + // the remaining two values are interpreted. If 'false' the request is passed on + // to the next revalidators. + // if VoucherResult is non nil, the request will enter a settlement phase awaiting + // a final update + OnComplete(chid ChannelID) (bool, VoucherResult, error) +} + +// TransportConfigurer provides a mechanism to provide transport specific configuration for a given voucher type +type TransportConfigurer func(chid ChannelID, voucher Voucher, transport Transport) + +// ReadyFunc is function that gets called once when the data transfer module is ready +type ReadyFunc func(error) + +// Manager is the core interface presented by all implementations of +// of the data transfer sub system +type Manager interface { + + // Start initializes data transfer processing + Start(ctx context.Context) error + + // OnReady registers a listener for when the data transfer comes on line + OnReady(ReadyFunc) + + // Stop terminates all data transfers and ends processing + Stop(ctx context.Context) error + + // RegisterVoucherType registers a validator for the given voucher type + // will error if voucher type does not implement voucher + // or if there is a voucher type registered with an identical identifier + RegisterVoucherType(voucherType Voucher, validator RequestValidator) error + + // RegisterRevalidator registers a revalidator for the given voucher type + // Note: this is the voucher type used to revalidate. It can share a name + // with the initial validator type and CAN be the same type, or a different type. + // The revalidator can simply be the sampe as the original request validator, + // or a different validator that satisfies the revalidator interface. + RegisterRevalidator(voucherType Voucher, revalidator Revalidator) error + + // RegisterVoucherResultType allows deserialization of a voucher result, + // so that a listener can read the metadata + RegisterVoucherResultType(resultType VoucherResult) error + + // RegisterTransportConfigurer registers the given transport configurer to be run on requests with the given voucher + // type + RegisterTransportConfigurer(voucherType Voucher, configurer TransportConfigurer) error + + // open a data transfer that will send data to the recipient peer and + // transfer parts of the piece that match the selector + OpenPushDataChannel(ctx context.Context, to peer.ID, voucher Voucher, baseCid cid.Cid, selector ipld.Node) (ChannelID, error) + + // open a data transfer that will request data from the sending peer and + // transfer parts of the piece that match the selector + OpenPullDataChannel(ctx context.Context, to peer.ID, voucher Voucher, baseCid cid.Cid, selector ipld.Node) (ChannelID, error) + + // send an intermediate voucher as needed when the receiver sends a request for revalidation + SendVoucher(ctx context.Context, chid ChannelID, voucher Voucher) error + + // close an open channel (effectively a cancel) + CloseDataTransferChannel(ctx context.Context, chid ChannelID) error + + // pause a data transfer channel (only allowed if transport supports it) + PauseDataTransferChannel(ctx context.Context, chid ChannelID) error + + // resume a data transfer channel (only allowed if transport supports it) + ResumeDataTransferChannel(ctx context.Context, chid ChannelID) error + + // get status of a transfer + TransferChannelStatus(ctx context.Context, x ChannelID) Status + + // get channel state + ChannelState(ctx context.Context, chid ChannelID) (ChannelState, error) + + // get notified when certain types of events happen + SubscribeToEvents(subscriber Subscriber) Unsubscribe + + // get all in progress transfers + InProgressChannels(ctx context.Context) (map[ChannelID]ChannelState, error) + + // RestartDataTransferChannel restarts an existing data transfer channel + RestartDataTransferChannel(ctx context.Context, chid ChannelID) error +} + +/* +Transport defines the interface for a transport layer for data +transfer. Where the data transfer manager will coordinate setting up push and +pull requests, validation, etc, the transport layer is responsible for moving +data back and forth, and may be medium specific. For example, some transports +may have the ability to pause and resume requests, while others may not. +Some may support individual data events, while others may only support message +events. Some transport layers may opt to use the actual data transfer network +protocols directly while others may be able to encode messages in their own +data protocol. + +Transport is the minimum interface that must be satisfied to serve as a datatransfer +transport layer. Transports must be able to open (open is always called by the receiving peer) +and close channels, and set at an event handler +*/ +type Transport interface { + // OpenChannel initiates an outgoing request for the other peer to send data + // to us on this channel + // Note: from a data transfer symantic standpoint, it doesn't matter if the + // request is push or pull -- OpenChannel is called by the party that is + // intending to receive data + OpenChannel( + ctx context.Context, + dataSender peer.ID, + channelID ChannelID, + root ipld.Link, + stor ipld.Node, + channel ChannelState, + msg Message, + ) error + + // CloseChannel closes the given channel + CloseChannel(ctx context.Context, chid ChannelID) error + // SetEventHandler sets the handler for events on channels + SetEventHandler(events EventsHandler) error + // CleanupChannel is called on the otherside of a cancel - removes any associated + // data for the channel + CleanupChannel(chid ChannelID) + Shutdown(ctx context.Context) error +} + +// EventsHandler are semantic data transfer events that happen as a result of graphsync hooks +type EventsHandler interface { + // OnChannelOpened is called when we send a request for data to the other + // peer on the given channel ID + // return values are: + // - error = ignore incoming data for this channel + OnChannelOpened(chid ChannelID) error + // OnResponseReceived is called when we receive a response to a request + // - nil = continue receiving data + // - error = cancel this request + OnResponseReceived(chid ChannelID, msg Response) error + // OnDataReceive is called when we receive data for the given channel ID + // return values are: + // - nil = proceed with sending data + // - error = cancel this request + // - err == ErrPause - pause this request + OnDataReceived(chid ChannelID, link ipld.Link, size uint64, index int64, unique bool) error + + // OnDataQueued is called when data is queued for sending for the given channel ID + // return values are: + // message = data transfer message along with data + // err = error + // - nil = proceed with sending data + // - error = cancel this request + // - err == ErrPause - pause this request + OnDataQueued(chid ChannelID, link ipld.Link, size uint64, index int64, unique bool) (Message, error) + + // OnDataSent is called when we send data for the given channel ID + OnDataSent(chid ChannelID, link ipld.Link, size uint64, index int64, unique bool) error + + // OnTransferQueued is called when a new data transfer request is queued in the transport layer. + OnTransferQueued(chid ChannelID) + + // OnRequestReceived is called when we receive a new request to send data + // for the given channel ID + // return values are: + // message = data transfer message along with reply + // err = error + // - nil = proceed with sending data + // - error = cancel this request + // - err == ErrPause - pause this request (only for new requests) + // - err == ErrResume - resume this request (only for update requests) + OnRequestReceived(chid ChannelID, msg Request) (Response, error) + // OnChannelCompleted is called when we finish transferring data for the given channel ID + // Error returns are logged but otherwise have no effect + OnChannelCompleted(chid ChannelID, err error) error + + // OnRequestCancelled is called when a request we opened (with the given channel Id) to + // receive data is cancelled by us. + // Error returns are logged but otherwise have no effect + OnRequestCancelled(chid ChannelID, err error) error + + // OnRequestDisconnected is called when a network error occurs trying to send a request + OnRequestDisconnected(chid ChannelID, err error) error + + // OnSendDataError is called when a network error occurs sending data + // at the transport layer + OnSendDataError(chid ChannelID, err error) error + + // OnReceiveDataError is called when a network error occurs receiving data + // at the transport layer + OnReceiveDataError(chid ChannelID, err error) error + + // OnContextAugment allows the transport to attach data transfer tracing information + // to its local context, in order to create a hierarchical trace + OnContextAugment(chid ChannelID) func(context.Context) context.Context +} + +// PauseableTransport is a transport that can also pause and resume channels +type PauseableTransport interface { + Transport + // PauseChannel paused the given channel ID + PauseChannel(ctx context.Context, + chid ChannelID, + ) error + // ResumeChannel resumes the given channel + ResumeChannel(ctx context.Context, + msg Message, + chid ChannelID, + ) error +} diff --git a/datatransfer/message.go b/datatransfer/message.go new file mode 100644 index 000000000..371ba86a0 --- /dev/null +++ b/datatransfer/message.go @@ -0,0 +1,56 @@ +package datatransfer + +import ( + "io" + + "github.com/filecoin-project/boost/datatransfer/encoding" + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/datamodel" + "github.com/libp2p/go-libp2p/core/protocol" +) + +var ( + // ProtocolDataTransfer1_2 is the protocol identifier for the latest + // version of data-transfer (supports do-not-send-first-blocks extension) + ProtocolDataTransfer1_2 protocol.ID = "/fil/datatransfer/1.2.0" +) + +// Message is a message for the data transfer protocol +// (either request or response) that can serialize to a protobuf +type Message interface { + IsRequest() bool + IsRestart() bool + IsNew() bool + IsUpdate() bool + IsPaused() bool + IsCancel() bool + TransferID() TransferID + ToNet(w io.Writer) error + ToIPLD() (datamodel.Node, error) + MessageForProtocol(targetProtocol protocol.ID) (newMsg Message, err error) +} + +// Request is a response message for the data transfer protocol +type Request interface { + Message + IsPull() bool + IsVoucher() bool + VoucherType() TypeIdentifier + Voucher(decoder encoding.Decoder) (encoding.Encodable, error) + BaseCid() cid.Cid + Selector() (ipld.Node, error) + IsRestartExistingChannelRequest() bool + RestartChannelId() (ChannelID, error) +} + +// Response is a response message for the data transfer protocol +type Response interface { + Message + IsVoucherResult() bool + IsComplete() bool + Accepted() bool + VoucherResultType() TypeIdentifier + VoucherResult(decoder encoding.Decoder) (encoding.Encodable, error) + EmptyVoucherResult() bool +} diff --git a/datatransfer/message/message.go b/datatransfer/message/message.go new file mode 100644 index 000000000..f98cb3a3f --- /dev/null +++ b/datatransfer/message/message.go @@ -0,0 +1,19 @@ +package message + +import ( + "github.com/filecoin-project/boost/datatransfer/message/message1_1prime" +) + +var NewRequest = message1_1.NewRequest +var RestartExistingChannelRequest = message1_1.RestartExistingChannelRequest +var UpdateRequest = message1_1.UpdateRequest +var VoucherRequest = message1_1.VoucherRequest +var RestartResponse = message1_1.RestartResponse +var NewResponse = message1_1.NewResponse +var VoucherResultResponse = message1_1.VoucherResultResponse +var CancelResponse = message1_1.CancelResponse +var UpdateResponse = message1_1.UpdateResponse +var FromNet = message1_1.FromNet +var FromIPLD = message1_1.FromIPLD +var CompleteResponse = message1_1.CompleteResponse +var CancelRequest = message1_1.CancelRequest diff --git a/datatransfer/message/message1_1/message.go b/datatransfer/message/message1_1/message.go new file mode 100644 index 000000000..c79de18a1 --- /dev/null +++ b/datatransfer/message/message1_1/message.go @@ -0,0 +1,195 @@ +package message1_1 + +import ( + "bytes" + "io" + + datatransfer2 "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/datatransfer/encoding" + "github.com/filecoin-project/boost/datatransfer/message/types" + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + "github.com/ipld/go-ipld-prime/datamodel" + cborgen "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" +) + +// NewRequest generates a new request for the data transfer protocol +func NewRequest(id datatransfer2.TransferID, isRestart bool, isPull bool, vtype datatransfer2.TypeIdentifier, voucher encoding.Encodable, baseCid cid.Cid, selector ipld.Node) (datatransfer2.Request, error) { + vbytes, err := encoding.Encode(voucher) + if err != nil { + return nil, xerrors.Errorf("Creating request: %w", err) + } + if baseCid == cid.Undef { + return nil, xerrors.Errorf("base CID must be defined") + } + selBytes, err := encoding.Encode(selector) + if err != nil { + return nil, xerrors.Errorf("Error encoding selector") + } + + var typ uint64 + if isRestart { + typ = uint64(types.RestartMessage) + } else { + typ = uint64(types.NewMessage) + } + + return &TransferRequest1_1{ + Type: typ, + Pull: isPull, + Vouch: &cborgen.Deferred{Raw: vbytes}, + Stor: &cborgen.Deferred{Raw: selBytes}, + BCid: &baseCid, + VTyp: vtype, + XferID: uint64(id), + }, nil +} + +// RestartExistingChannelRequest creates a request to ask the other side to restart an existing channel +func RestartExistingChannelRequest(channelId datatransfer2.ChannelID) datatransfer2.Request { + + return &TransferRequest1_1{Type: uint64(types.RestartExistingChannelRequestMessage), + RestartChannel: channelId} +} + +// CancelRequest request generates a request to cancel an in progress request +func CancelRequest(id datatransfer2.TransferID) datatransfer2.Request { + return &TransferRequest1_1{ + Type: uint64(types.CancelMessage), + XferID: uint64(id), + } +} + +// UpdateRequest generates a new request update +func UpdateRequest(id datatransfer2.TransferID, isPaused bool) datatransfer2.Request { + return &TransferRequest1_1{ + Type: uint64(types.UpdateMessage), + Paus: isPaused, + XferID: uint64(id), + } +} + +// VoucherRequest generates a new request for the data transfer protocol +func VoucherRequest(id datatransfer2.TransferID, vtype datatransfer2.TypeIdentifier, voucher encoding.Encodable) (datatransfer2.Request, error) { + vbytes, err := encoding.Encode(voucher) + if err != nil { + return nil, xerrors.Errorf("Creating request: %w", err) + } + return &TransferRequest1_1{ + Type: uint64(types.VoucherMessage), + Vouch: &cborgen.Deferred{Raw: vbytes}, + VTyp: vtype, + XferID: uint64(id), + }, nil +} + +// RestartResponse builds a new Data Transfer response +func RestartResponse(id datatransfer2.TransferID, accepted bool, isPaused bool, voucherResultType datatransfer2.TypeIdentifier, voucherResult encoding.Encodable) (datatransfer2.Response, error) { + vbytes, err := encoding.Encode(voucherResult) + if err != nil { + return nil, xerrors.Errorf("Creating request: %w", err) + } + return &TransferResponse1_1{ + Acpt: accepted, + Type: uint64(types.RestartMessage), + Paus: isPaused, + XferID: uint64(id), + VTyp: voucherResultType, + VRes: &cborgen.Deferred{Raw: vbytes}, + }, nil +} + +// NewResponse builds a new Data Transfer response +func NewResponse(id datatransfer2.TransferID, accepted bool, isPaused bool, voucherResultType datatransfer2.TypeIdentifier, voucherResult encoding.Encodable) (datatransfer2.Response, error) { + vbytes, err := encoding.Encode(voucherResult) + if err != nil { + return nil, xerrors.Errorf("Creating request: %w", err) + } + return &TransferResponse1_1{ + Acpt: accepted, + Type: uint64(types.NewMessage), + Paus: isPaused, + XferID: uint64(id), + VTyp: voucherResultType, + VRes: &cborgen.Deferred{Raw: vbytes}, + }, nil +} + +// VoucherResultResponse builds a new response for a voucher result +func VoucherResultResponse(id datatransfer2.TransferID, accepted bool, isPaused bool, voucherResultType datatransfer2.TypeIdentifier, voucherResult encoding.Encodable) (datatransfer2.Response, error) { + vbytes, err := encoding.Encode(voucherResult) + if err != nil { + return nil, xerrors.Errorf("Creating request: %w", err) + } + return &TransferResponse1_1{ + Acpt: accepted, + Type: uint64(types.VoucherResultMessage), + Paus: isPaused, + XferID: uint64(id), + VTyp: voucherResultType, + VRes: &cborgen.Deferred{Raw: vbytes}, + }, nil +} + +// UpdateResponse returns a new update response +func UpdateResponse(id datatransfer2.TransferID, isPaused bool) datatransfer2.Response { + return &TransferResponse1_1{ + Type: uint64(types.UpdateMessage), + Paus: isPaused, + XferID: uint64(id), + } +} + +// CancelResponse makes a new cancel response message +func CancelResponse(id datatransfer2.TransferID) datatransfer2.Response { + return &TransferResponse1_1{ + Type: uint64(types.CancelMessage), + XferID: uint64(id), + } +} + +// CompleteResponse returns a new complete response message +func CompleteResponse(id datatransfer2.TransferID, isAccepted bool, isPaused bool, voucherResultType datatransfer2.TypeIdentifier, voucherResult encoding.Encodable) (datatransfer2.Response, error) { + vbytes, err := encoding.Encode(voucherResult) + if err != nil { + return nil, xerrors.Errorf("Creating request: %w", err) + } + return &TransferResponse1_1{ + Type: uint64(types.CompleteMessage), + Acpt: isAccepted, + Paus: isPaused, + VTyp: voucherResultType, + VRes: &cborgen.Deferred{Raw: vbytes}, + XferID: uint64(id), + }, nil +} + +// FromNet can read a network stream to deserialize a GraphSyncMessage +func FromNet(r io.Reader) (datatransfer2.Message, error) { + tresp := TransferMessage1_1{} + err := tresp.UnmarshalCBOR(r) + if err != nil { + return nil, err + } + + if (tresp.IsRequest() && tresp.Request == nil) || (!tresp.IsRequest() && tresp.Response == nil) { + return nil, xerrors.Errorf("invalid/malformed message") + } + + if tresp.IsRequest() { + return tresp.Request, nil + } + return tresp.Response, nil +} + +// FromNet can read a network stream to deserialize a GraphSyncMessage +func FromIPLD(nd datamodel.Node) (datatransfer2.Message, error) { + buf := new(bytes.Buffer) + err := dagcbor.Encode(nd, buf) + if err != nil { + return nil, err + } + return FromNet(buf) +} diff --git a/datatransfer/message/message1_1/transfer_message.go b/datatransfer/message/message1_1/transfer_message.go new file mode 100644 index 000000000..42c7493af --- /dev/null +++ b/datatransfer/message/message1_1/transfer_message.go @@ -0,0 +1,58 @@ +package message1_1 + +import ( + "bytes" + "io" + + "github.com/filecoin-project/boost/datatransfer" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + "github.com/ipld/go-ipld-prime/datamodel" + basicnode "github.com/ipld/go-ipld-prime/node/basic" +) + +//go:generate cbor-gen-for --map-encoding TransferMessage1_1 + +// transferMessage1_1 is the transfer message for the 1.1 Data Transfer Protocol. +type TransferMessage1_1 struct { + IsRq bool + + Request *TransferRequest1_1 + Response *TransferResponse1_1 +} + +// ========= datatransfer.Message interface + +// IsRequest returns true if this message is a data request +func (tm *TransferMessage1_1) IsRequest() bool { + return tm.IsRq +} + +// TransferID returns the TransferID of this message +func (tm *TransferMessage1_1) TransferID() datatransfer.TransferID { + if tm.IsRequest() { + return tm.Request.TransferID() + } + return tm.Response.TransferID() +} + +// ToNet serializes a transfer message type. It is simply a wrapper for MarshalCBOR, to provide +// symmetry with FromNet +func (tm *TransferMessage1_1) ToIPLD() (datamodel.Node, error) { + buf := new(bytes.Buffer) + err := tm.ToNet(buf) + if err != nil { + return nil, err + } + nb := basicnode.Prototype.Any.NewBuilder() + err = dagcbor.Decode(nb, buf) + if err != nil { + return nil, err + } + return nb.Build(), nil +} + +// ToNet serializes a transfer message type. It is simply a wrapper for MarshalCBOR, to provide +// symmetry with FromNet +func (tm *TransferMessage1_1) ToNet(w io.Writer) error { + return tm.MarshalCBOR(w) +} diff --git a/datatransfer/message/message1_1/transfer_message_cbor_gen.go b/datatransfer/message/message1_1/transfer_message_cbor_gen.go new file mode 100644 index 000000000..dcf3a6d15 --- /dev/null +++ b/datatransfer/message/message1_1/transfer_message_cbor_gen.go @@ -0,0 +1,187 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package message1_1 + +import ( + "fmt" + "io" + "math" + "sort" + + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +func (t *TransferMessage1_1) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{163}); err != nil { + return err + } + + // t.IsRq (bool) (bool) + if len("IsRq") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"IsRq\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("IsRq"))); err != nil { + return err + } + if _, err := cw.WriteString(string("IsRq")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.IsRq); err != nil { + return err + } + + // t.Request (message1_1.TransferRequest1_1) (struct) + if len("Request") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Request\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Request"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Request")); err != nil { + return err + } + + if err := t.Request.MarshalCBOR(cw); err != nil { + return err + } + + // t.Response (message1_1.TransferResponse1_1) (struct) + if len("Response") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Response\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Response"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Response")); err != nil { + return err + } + + if err := t.Response.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *TransferMessage1_1) UnmarshalCBOR(r io.Reader) (err error) { + *t = TransferMessage1_1{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("TransferMessage1_1: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.IsRq (bool) (bool) + case "IsRq": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.IsRq = false + case 21: + t.IsRq = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.Request (message1_1.TransferRequest1_1) (struct) + case "Request": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Request = new(TransferRequest1_1) + if err := t.Request.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Request pointer: %w", err) + } + } + + } + // t.Response (message1_1.TransferResponse1_1) (struct) + case "Response": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Response = new(TransferResponse1_1) + if err := t.Response.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Response pointer: %w", err) + } + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} diff --git a/datatransfer/message/message1_1/transfer_request.go b/datatransfer/message/message1_1/transfer_request.go new file mode 100644 index 000000000..6ea55a775 --- /dev/null +++ b/datatransfer/message/message1_1/transfer_request.go @@ -0,0 +1,165 @@ +package message1_1 + +import ( + "bytes" + "io" + + datatransfer2 "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/datatransfer/encoding" + "github.com/filecoin-project/boost/datatransfer/message/types" + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + "github.com/ipld/go-ipld-prime/datamodel" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/libp2p/go-libp2p/core/protocol" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" +) + +//go:generate cbor-gen-for --map-encoding TransferRequest1_1 + +// TransferRequest1_1 is a struct for the 1.1 Data Transfer Protocol that fulfills the datatransfer.Request interface. +// its members are exported to be used by cbor-gen +type TransferRequest1_1 struct { + BCid *cid.Cid + Type uint64 + Paus bool + Part bool + Pull bool + Stor *cbg.Deferred + Vouch *cbg.Deferred + VTyp datatransfer2.TypeIdentifier + XferID uint64 + + RestartChannel datatransfer2.ChannelID +} + +func (trq *TransferRequest1_1) MessageForProtocol(targetProtocol protocol.ID) (datatransfer2.Message, error) { + switch targetProtocol { + case datatransfer2.ProtocolDataTransfer1_2: + return trq, nil + default: + return nil, xerrors.Errorf("protocol not supported") + } +} + +// IsRequest always returns true in this case because this is a transfer request +func (trq *TransferRequest1_1) IsRequest() bool { + return true +} + +func (trq *TransferRequest1_1) IsRestart() bool { + return trq.Type == uint64(types.RestartMessage) +} + +func (trq *TransferRequest1_1) IsRestartExistingChannelRequest() bool { + return trq.Type == uint64(types.RestartExistingChannelRequestMessage) +} + +func (trq *TransferRequest1_1) RestartChannelId() (datatransfer2.ChannelID, error) { + if !trq.IsRestartExistingChannelRequest() { + return datatransfer2.ChannelID{}, xerrors.New("not a restart request") + } + return trq.RestartChannel, nil +} + +func (trq *TransferRequest1_1) IsNew() bool { + return trq.Type == uint64(types.NewMessage) +} + +func (trq *TransferRequest1_1) IsUpdate() bool { + return trq.Type == uint64(types.UpdateMessage) +} + +func (trq *TransferRequest1_1) IsVoucher() bool { + return trq.Type == uint64(types.VoucherMessage) || trq.Type == uint64(types.NewMessage) +} + +func (trq *TransferRequest1_1) IsPaused() bool { + return trq.Paus +} + +func (trq *TransferRequest1_1) TransferID() datatransfer2.TransferID { + return datatransfer2.TransferID(trq.XferID) +} + +// ========= datatransfer.Request interface +// IsPull returns true if this is a data pull request +func (trq *TransferRequest1_1) IsPull() bool { + return trq.Pull +} + +// VoucherType returns the Voucher ID +func (trq *TransferRequest1_1) VoucherType() datatransfer2.TypeIdentifier { + return trq.VTyp +} + +// Voucher returns the Voucher bytes +func (trq *TransferRequest1_1) Voucher(decoder encoding.Decoder) (encoding.Encodable, error) { + if trq.Vouch == nil { + return nil, xerrors.New("No voucher present to read") + } + return decoder.DecodeFromCbor(trq.Vouch.Raw) +} + +func (trq *TransferRequest1_1) EmptyVoucher() bool { + return trq.VTyp == datatransfer2.EmptyTypeIdentifier +} + +// BaseCid returns the Base CID +func (trq *TransferRequest1_1) BaseCid() cid.Cid { + if trq.BCid == nil { + return cid.Undef + } + return *trq.BCid +} + +// Selector returns the message Selector bytes +func (trq *TransferRequest1_1) Selector() (ipld.Node, error) { + if trq.Stor == nil { + return nil, xerrors.New("No selector present to read") + } + builder := basicnode.Prototype.Any.NewBuilder() + reader := bytes.NewReader(trq.Stor.Raw) + err := dagcbor.Decode(builder, reader) + if err != nil { + return nil, xerrors.Errorf("Error decoding selector: %w", err) + } + return builder.Build(), nil +} + +// IsCancel returns true if this is a cancel request +func (trq *TransferRequest1_1) IsCancel() bool { + return trq.Type == uint64(types.CancelMessage) +} + +// IsPartial returns true if this is a partial request +func (trq *TransferRequest1_1) IsPartial() bool { + return trq.Part +} + +func (trq *TransferRequest1_1) ToIPLD() (datamodel.Node, error) { + buf := new(bytes.Buffer) + err := trq.ToNet(buf) + if err != nil { + return nil, err + } + nb := basicnode.Prototype.Any.NewBuilder() + err = dagcbor.Decode(nb, buf) + if err != nil { + return nil, err + } + return nb.Build(), nil +} + +// ToNet serializes a transfer request. It's a wrapper for MarshalCBOR to provide +// symmetry with FromNet +func (trq *TransferRequest1_1) ToNet(w io.Writer) error { + msg := TransferMessage1_1{ + IsRq: true, + Request: trq, + Response: nil, + } + return msg.MarshalCBOR(w) +} diff --git a/datatransfer/message/message1_1/transfer_request_cbor_gen.go b/datatransfer/message/message1_1/transfer_request_cbor_gen.go new file mode 100644 index 000000000..53e63742a --- /dev/null +++ b/datatransfer/message/message1_1/transfer_request_cbor_gen.go @@ -0,0 +1,405 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package message1_1 + +import ( + "fmt" + "io" + "math" + "sort" + + "github.com/filecoin-project/boost/datatransfer" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +func (t *TransferRequest1_1) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{170}); err != nil { + return err + } + + // t.BCid (cid.Cid) (struct) + if len("BCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"BCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("BCid"))); err != nil { + return err + } + if _, err := cw.WriteString(string("BCid")); err != nil { + return err + } + + if t.BCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.BCid); err != nil { + return xerrors.Errorf("failed to write cid field t.BCid: %w", err) + } + } + + // t.Part (bool) (bool) + if len("Part") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Part\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Part"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Part")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.Part); err != nil { + return err + } + + // t.Paus (bool) (bool) + if len("Paus") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Paus\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Paus"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Paus")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.Paus); err != nil { + return err + } + + // t.Pull (bool) (bool) + if len("Pull") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Pull\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Pull"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Pull")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.Pull); err != nil { + return err + } + + // t.Stor (typegen.Deferred) (struct) + if len("Stor") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Stor\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Stor"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Stor")); err != nil { + return err + } + + if err := t.Stor.MarshalCBOR(cw); err != nil { + return err + } + + // t.Type (uint64) (uint64) + if len("Type") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Type\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Type"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Type")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Type)); err != nil { + return err + } + + // t.VTyp (datatransfer.TypeIdentifier) (string) + if len("VTyp") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"VTyp\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("VTyp"))); err != nil { + return err + } + if _, err := cw.WriteString(string("VTyp")); err != nil { + return err + } + + if len(t.VTyp) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.VTyp was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.VTyp))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.VTyp)); err != nil { + return err + } + + // t.Vouch (typegen.Deferred) (struct) + if len("Vouch") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Vouch\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Vouch"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Vouch")); err != nil { + return err + } + + if err := t.Vouch.MarshalCBOR(cw); err != nil { + return err + } + + // t.XferID (uint64) (uint64) + if len("XferID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"XferID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("XferID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("XferID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.XferID)); err != nil { + return err + } + + // t.RestartChannel (datatransfer.ChannelID) (struct) + if len("RestartChannel") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"RestartChannel\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("RestartChannel"))); err != nil { + return err + } + if _, err := cw.WriteString(string("RestartChannel")); err != nil { + return err + } + + if err := t.RestartChannel.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *TransferRequest1_1) UnmarshalCBOR(r io.Reader) (err error) { + *t = TransferRequest1_1{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("TransferRequest1_1: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.BCid (cid.Cid) (struct) + case "BCid": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.BCid: %w", err) + } + + t.BCid = &c + } + + } + // t.Part (bool) (bool) + case "Part": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.Part = false + case 21: + t.Part = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.Paus (bool) (bool) + case "Paus": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.Paus = false + case 21: + t.Paus = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.Pull (bool) (bool) + case "Pull": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.Pull = false + case 21: + t.Pull = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.Stor (typegen.Deferred) (struct) + case "Stor": + + { + + t.Stor = new(cbg.Deferred) + + if err := t.Stor.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("failed to read deferred field: %w", err) + } + } + // t.Type (uint64) (uint64) + case "Type": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Type = uint64(extra) + + } + // t.VTyp (datatransfer.TypeIdentifier) (string) + case "VTyp": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.VTyp = datatransfer.TypeIdentifier(sval) + } + // t.Vouch (typegen.Deferred) (struct) + case "Vouch": + + { + + t.Vouch = new(cbg.Deferred) + + if err := t.Vouch.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("failed to read deferred field: %w", err) + } + } + // t.XferID (uint64) (uint64) + case "XferID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.XferID = uint64(extra) + + } + // t.RestartChannel (datatransfer.ChannelID) (struct) + case "RestartChannel": + + { + + if err := t.RestartChannel.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.RestartChannel: %w", err) + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} diff --git a/datatransfer/message/message1_1/transfer_response.go b/datatransfer/message/message1_1/transfer_response.go new file mode 100644 index 000000000..d62a082c8 --- /dev/null +++ b/datatransfer/message/message1_1/transfer_response.go @@ -0,0 +1,126 @@ +package message1_1 + +import ( + "bytes" + "io" + + datatransfer2 "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/datatransfer/encoding" + "github.com/filecoin-project/boost/datatransfer/message/types" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + "github.com/ipld/go-ipld-prime/datamodel" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/libp2p/go-libp2p/core/protocol" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" +) + +//go:generate cbor-gen-for --map-encoding TransferResponse1_1 + +// TransferResponse1_1 is a private struct that satisfies the datatransfer.Response interface +// It is the response message for the Data Transfer 1.1 and 1.2 Protocol. +type TransferResponse1_1 struct { + Type uint64 + Acpt bool + Paus bool + XferID uint64 + VRes *cbg.Deferred + VTyp datatransfer2.TypeIdentifier +} + +func (trsp *TransferResponse1_1) TransferID() datatransfer2.TransferID { + return datatransfer2.TransferID(trsp.XferID) +} + +// IsRequest always returns false in this case because this is a transfer response +func (trsp *TransferResponse1_1) IsRequest() bool { + return false +} + +// IsNew returns true if this is the first response sent +func (trsp *TransferResponse1_1) IsNew() bool { + return trsp.Type == uint64(types.NewMessage) +} + +// IsUpdate returns true if this response is an update +func (trsp *TransferResponse1_1) IsUpdate() bool { + return trsp.Type == uint64(types.UpdateMessage) +} + +// IsPaused returns true if the responder is paused +func (trsp *TransferResponse1_1) IsPaused() bool { + return trsp.Paus +} + +// IsCancel returns true if the responder has cancelled this response +func (trsp *TransferResponse1_1) IsCancel() bool { + return trsp.Type == uint64(types.CancelMessage) +} + +// IsComplete returns true if the responder has completed this response +func (trsp *TransferResponse1_1) IsComplete() bool { + return trsp.Type == uint64(types.CompleteMessage) +} + +func (trsp *TransferResponse1_1) IsVoucherResult() bool { + return trsp.Type == uint64(types.VoucherResultMessage) || trsp.Type == uint64(types.NewMessage) || trsp.Type == uint64(types.CompleteMessage) || + trsp.Type == uint64(types.RestartMessage) +} + +// Accepted returns true if the request is accepted in the response +func (trsp *TransferResponse1_1) Accepted() bool { + return trsp.Acpt +} + +func (trsp *TransferResponse1_1) VoucherResultType() datatransfer2.TypeIdentifier { + return trsp.VTyp +} + +func (trsp *TransferResponse1_1) VoucherResult(decoder encoding.Decoder) (encoding.Encodable, error) { + if trsp.VRes == nil { + return nil, xerrors.New("No voucher present to read") + } + return decoder.DecodeFromCbor(trsp.VRes.Raw) +} + +func (trq *TransferResponse1_1) IsRestart() bool { + return trq.Type == uint64(types.RestartMessage) +} + +func (trsp *TransferResponse1_1) EmptyVoucherResult() bool { + return trsp.VTyp == datatransfer2.EmptyTypeIdentifier +} + +func (trsp *TransferResponse1_1) MessageForProtocol(targetProtocol protocol.ID) (datatransfer2.Message, error) { + switch targetProtocol { + case datatransfer2.ProtocolDataTransfer1_2: + return trsp, nil + default: + return nil, xerrors.Errorf("protocol %s not supported", targetProtocol) + } +} + +func (trsp *TransferResponse1_1) ToIPLD() (datamodel.Node, error) { + buf := new(bytes.Buffer) + err := trsp.ToNet(buf) + if err != nil { + return nil, err + } + nb := basicnode.Prototype.Any.NewBuilder() + err = dagcbor.Decode(nb, buf) + if err != nil { + return nil, err + } + return nb.Build(), nil +} + +// ToNet serializes a transfer response. It's a wrapper for MarshalCBOR to provide +// symmetry with FromNet +func (trsp *TransferResponse1_1) ToNet(w io.Writer) error { + msg := TransferMessage1_1{ + IsRq: false, + Request: nil, + Response: trsp, + } + return msg.MarshalCBOR(w) +} diff --git a/datatransfer/message/message1_1/transfer_response_cbor_gen.go b/datatransfer/message/message1_1/transfer_response_cbor_gen.go new file mode 100644 index 000000000..5bdcd004b --- /dev/null +++ b/datatransfer/message/message1_1/transfer_response_cbor_gen.go @@ -0,0 +1,265 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package message1_1 + +import ( + "fmt" + "io" + "sort" + + "github.com/filecoin-project/boost/datatransfer" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = sort.Sort + +func (t *TransferResponse1_1) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{166}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Type (uint64) (uint64) + if len("Type") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Type\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Type"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Type")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Type)); err != nil { + return err + } + + // t.Acpt (bool) (bool) + if len("Acpt") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Acpt\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Acpt"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Acpt")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.Acpt); err != nil { + return err + } + + // t.Paus (bool) (bool) + if len("Paus") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Paus\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Paus"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Paus")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.Paus); err != nil { + return err + } + + // t.XferID (uint64) (uint64) + if len("XferID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"XferID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("XferID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("XferID")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.XferID)); err != nil { + return err + } + + // t.VRes (typegen.Deferred) (struct) + if len("VRes") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"VRes\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("VRes"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("VRes")); err != nil { + return err + } + + if err := t.VRes.MarshalCBOR(w); err != nil { + return err + } + + // t.VTyp (datatransfer.TypeIdentifier) (string) + if len("VTyp") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"VTyp\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("VTyp"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("VTyp")); err != nil { + return err + } + + if len(t.VTyp) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.VTyp was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.VTyp))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.VTyp)); err != nil { + return err + } + return nil +} + +func (t *TransferResponse1_1) UnmarshalCBOR(r io.Reader) error { + *t = TransferResponse1_1{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("TransferResponse1_1: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Type (uint64) (uint64) + case "Type": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Type = uint64(extra) + + } + // t.Acpt (bool) (bool) + case "Acpt": + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.Acpt = false + case 21: + t.Acpt = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.Paus (bool) (bool) + case "Paus": + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.Paus = false + case 21: + t.Paus = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.XferID (uint64) (uint64) + case "XferID": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.XferID = uint64(extra) + + } + // t.VRes (typegen.Deferred) (struct) + case "VRes": + + { + + t.VRes = new(cbg.Deferred) + + if err := t.VRes.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("failed to read deferred field: %w", err) + } + } + // t.VTyp (datatransfer.TypeIdentifier) (string) + case "VTyp": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.VTyp = datatransfer.TypeIdentifier(sval) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} diff --git a/datatransfer/message/message1_1prime/message.go b/datatransfer/message/message1_1prime/message.go new file mode 100644 index 000000000..d4d32897f --- /dev/null +++ b/datatransfer/message/message1_1prime/message.go @@ -0,0 +1,206 @@ +package message1_1 + +import ( + "io" + + datatransfer2 "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/datatransfer/encoding" + "github.com/filecoin-project/boost/datatransfer/message/types" + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + "github.com/ipld/go-ipld-prime/datamodel" + "github.com/ipld/go-ipld-prime/node/bindnode" + "github.com/ipld/go-ipld-prime/schema" + "golang.org/x/xerrors" +) + +// NewRequest generates a new request for the data transfer protocol +func NewRequest(id datatransfer2.TransferID, isRestart bool, isPull bool, vtype datatransfer2.TypeIdentifier, voucher encoding.Encodable, baseCid cid.Cid, selector ipld.Node) (datatransfer2.Request, error) { + vnode, err := encoding.EncodeToNode(voucher) + if err != nil { + return nil, xerrors.Errorf("Creating request: %w", err) + } + + if baseCid == cid.Undef { + return nil, xerrors.Errorf("base CID must be defined") + } + + var typ uint64 + if isRestart { + typ = uint64(types.RestartMessage) + } else { + typ = uint64(types.NewMessage) + } + + return &TransferRequest1_1{ + MessageType: typ, + Pull: isPull, + VoucherPtr: &vnode, + SelectorPtr: &selector, + BaseCidPtr: &baseCid, + VoucherTypeIdentifier: vtype, + TransferId: uint64(id), + }, nil +} + +// RestartExistingChannelRequest creates a request to ask the other side to restart an existing channel +func RestartExistingChannelRequest(channelId datatransfer2.ChannelID) datatransfer2.Request { + return &TransferRequest1_1{ + MessageType: uint64(types.RestartExistingChannelRequestMessage), + RestartChannel: channelId, + } +} + +// CancelRequest request generates a request to cancel an in progress request +func CancelRequest(id datatransfer2.TransferID) datatransfer2.Request { + return &TransferRequest1_1{ + MessageType: uint64(types.CancelMessage), + TransferId: uint64(id), + } +} + +// UpdateRequest generates a new request update +func UpdateRequest(id datatransfer2.TransferID, isPaused bool) datatransfer2.Request { + return &TransferRequest1_1{ + MessageType: uint64(types.UpdateMessage), + Pause: isPaused, + TransferId: uint64(id), + } +} + +// VoucherRequest generates a new request for the data transfer protocol +func VoucherRequest(id datatransfer2.TransferID, vtype datatransfer2.TypeIdentifier, voucher encoding.Encodable) (datatransfer2.Request, error) { + vnode, err := encoding.EncodeToNode(voucher) + if err != nil { + return nil, xerrors.Errorf("Creating request: %w", err) + } + return &TransferRequest1_1{ + MessageType: uint64(types.VoucherMessage), + VoucherPtr: &vnode, + VoucherTypeIdentifier: vtype, + TransferId: uint64(id), + }, nil +} + +// RestartResponse builds a new Data Transfer response +func RestartResponse(id datatransfer2.TransferID, accepted bool, isPaused bool, voucherResultType datatransfer2.TypeIdentifier, voucherResult encoding.Encodable) (datatransfer2.Response, error) { + vnode, err := encoding.EncodeToNode(voucherResult) + if err != nil { + return nil, xerrors.Errorf("Creating request: %w", err) + } + return &TransferResponse1_1{ + RequestAccepted: accepted, + MessageType: uint64(types.RestartMessage), + Paused: isPaused, + TransferId: uint64(id), + VoucherTypeIdentifier: voucherResultType, + VoucherResultPtr: &vnode, + }, nil +} + +// NewResponse builds a new Data Transfer response +func NewResponse(id datatransfer2.TransferID, accepted bool, isPaused bool, voucherResultType datatransfer2.TypeIdentifier, voucherResult encoding.Encodable) (datatransfer2.Response, error) { + vnode, err := encoding.EncodeToNode(voucherResult) + if err != nil { + return nil, xerrors.Errorf("Creating request: %w", err) + } + return &TransferResponse1_1{ + RequestAccepted: accepted, + MessageType: uint64(types.NewMessage), + Paused: isPaused, + TransferId: uint64(id), + VoucherTypeIdentifier: voucherResultType, + VoucherResultPtr: &vnode, + }, nil +} + +// VoucherResultResponse builds a new response for a voucher result +func VoucherResultResponse(id datatransfer2.TransferID, accepted bool, isPaused bool, voucherResultType datatransfer2.TypeIdentifier, voucherResult encoding.Encodable) (datatransfer2.Response, error) { + vnode, err := encoding.EncodeToNode(voucherResult) + if err != nil { + return nil, xerrors.Errorf("Creating request: %w", err) + } + return &TransferResponse1_1{ + RequestAccepted: accepted, + MessageType: uint64(types.VoucherResultMessage), + Paused: isPaused, + TransferId: uint64(id), + VoucherTypeIdentifier: voucherResultType, + VoucherResultPtr: &vnode, + }, nil +} + +// UpdateResponse returns a new update response +func UpdateResponse(id datatransfer2.TransferID, isPaused bool) datatransfer2.Response { + return &TransferResponse1_1{ + MessageType: uint64(types.UpdateMessage), + Paused: isPaused, + TransferId: uint64(id), + } +} + +// CancelResponse makes a new cancel response message +func CancelResponse(id datatransfer2.TransferID) datatransfer2.Response { + return &TransferResponse1_1{ + MessageType: uint64(types.CancelMessage), + TransferId: uint64(id), + } +} + +// CompleteResponse returns a new complete response message +func CompleteResponse(id datatransfer2.TransferID, isAccepted bool, isPaused bool, voucherResultType datatransfer2.TypeIdentifier, voucherResult encoding.Encodable) (datatransfer2.Response, error) { + vnode, err := encoding.EncodeToNode(voucherResult) + if err != nil { + return nil, xerrors.Errorf("Creating request: %w", err) + } + return &TransferResponse1_1{ + MessageType: uint64(types.CompleteMessage), + RequestAccepted: isAccepted, + Paused: isPaused, + VoucherTypeIdentifier: voucherResultType, + VoucherResultPtr: &vnode, + TransferId: uint64(id), + }, nil +} + +// FromNet can read a network stream to deserialize a GraphSyncMessage +func FromNet(r io.Reader) (datatransfer2.Message, error) { + builder := Prototype.TransferMessage.Representation().NewBuilder() + err := dagcbor.Decode(builder, r) + if err != nil { + return nil, err + } + node := builder.Build() + tresp := bindnode.Unwrap(node).(*TransferMessage1_1) + + if (tresp.IsRequest && tresp.Request == nil) || (!tresp.IsRequest && tresp.Response == nil) { + return nil, xerrors.Errorf("invalid/malformed message") + } + + if tresp.IsRequest { + return tresp.Request, nil + } + return tresp.Response, nil +} + +// FromNet can read a network stream to deserialize a GraphSyncMessage +func FromIPLD(node datamodel.Node) (datatransfer2.Message, error) { + if tn, ok := node.(schema.TypedNode); ok { // shouldn't need this if from Graphsync + node = tn.Representation() + } + builder := Prototype.TransferMessage.Representation().NewBuilder() + err := builder.AssignNode(node) + if err != nil { + return nil, err + } + tresp := bindnode.Unwrap(builder.Build()).(*TransferMessage1_1) + if (tresp.IsRequest && tresp.Request == nil) || (!tresp.IsRequest && tresp.Response == nil) { + return nil, xerrors.Errorf("invalid/malformed message") + } + + if tresp.IsRequest { + return tresp.Request, nil + } + return tresp.Response, nil +} diff --git a/datatransfer/message/message1_1prime/schema.go b/datatransfer/message/message1_1prime/schema.go new file mode 100644 index 000000000..c779b1fc9 --- /dev/null +++ b/datatransfer/message/message1_1prime/schema.go @@ -0,0 +1,29 @@ +package message1_1 + +import ( + _ "embed" + + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/node/bindnode" + "github.com/ipld/go-ipld-prime/schema" +) + +//go:embed schema.ipldsch +var embedSchema []byte + +var Prototype struct { + TransferMessage schema.TypedPrototype + TransferRequest schema.TypedPrototype + TransferResponse schema.TypedPrototype +} + +func init() { + ts, err := ipld.LoadSchemaBytes(embedSchema) + if err != nil { + panic(err) + } + + Prototype.TransferMessage = bindnode.Prototype((*TransferMessage1_1)(nil), ts.TypeByName("TransferMessage")) + Prototype.TransferRequest = bindnode.Prototype((*TransferRequest1_1)(nil), ts.TypeByName("TransferRequest")) + Prototype.TransferResponse = bindnode.Prototype((*TransferResponse1_1)(nil), ts.TypeByName("TransferResponse")) +} diff --git a/datatransfer/message/message1_1prime/schema.ipldsch b/datatransfer/message/message1_1prime/schema.ipldsch new file mode 100644 index 000000000..714135141 --- /dev/null +++ b/datatransfer/message/message1_1prime/schema.ipldsch @@ -0,0 +1,37 @@ +type PeerID string # peer.ID, really should be bytes (this is non-utf8) but is string for backward compat +type TransferID int +type TypeIdentifier string + +type ChannelID struct { + Initiator PeerID + Responder PeerID + ID TransferID +} representation tuple + +type TransferRequest struct { + BaseCidPtr nullable Link (rename "BCid") + MessageType Int (rename "Type") + Pause Bool (rename "Paus") + Partial Bool (rename "Part") + Pull Bool (rename "Pull") + SelectorPtr nullable Any (rename "Stor") + VoucherPtr nullable Any (rename "Vouch") + VoucherTypeIdentifier TypeIdentifier (rename "VTyp") + TransferId Int (rename "XferID") + RestartChannel ChannelID +} + +type TransferResponse struct { + MessageType Int (rename "Type") + RequestAccepted Bool (rename "Acpt") + Paused Bool (rename "Paus") + TransferId Int (rename "XferID") + VoucherResultPtr nullable Any (rename "VRes") + VoucherTypeIdentifier TypeIdentifier (rename "VTyp") +} + +type TransferMessage struct { + IsRequest Bool (rename "IsRq") + Request nullable TransferRequest + Response nullable TransferResponse +} diff --git a/datatransfer/message/message1_1prime/transfer_message.go b/datatransfer/message/message1_1prime/transfer_message.go new file mode 100644 index 000000000..f66b9f2d3 --- /dev/null +++ b/datatransfer/message/message1_1prime/transfer_message.go @@ -0,0 +1,43 @@ +package message1_1 + +import ( + "io" + + "github.com/filecoin-project/boost/datatransfer" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + "github.com/ipld/go-ipld-prime/datamodel" + "github.com/ipld/go-ipld-prime/node/bindnode" + "github.com/ipld/go-ipld-prime/schema" +) + +// TransferMessage1_1 is the transfer message for the 1.1 Data Transfer Protocol. +type TransferMessage1_1 struct { + IsRequest bool + + Request *TransferRequest1_1 + Response *TransferResponse1_1 +} + +// ========= datatransfer.Message interface + +// TransferID returns the TransferID of this message +func (tm *TransferMessage1_1) TransferID() datatransfer.TransferID { + if tm.IsRequest { + return tm.Request.TransferID() + } + return tm.Response.TransferID() +} + +func (tm *TransferMessage1_1) toIPLD() schema.TypedNode { + return bindnode.Wrap(tm, Prototype.TransferMessage.Type()) +} + +// ToNet serializes a transfer message type. +func (tm *TransferMessage1_1) ToIPLD() (datamodel.Node, error) { + return tm.toIPLD().Representation(), nil +} + +// ToNet serializes a transfer message type. +func (tm *TransferMessage1_1) ToNet(w io.Writer) error { + return dagcbor.Encode(tm.toIPLD().Representation(), w) +} diff --git a/datatransfer/message/message1_1prime/transfer_request.go b/datatransfer/message/message1_1prime/transfer_request.go new file mode 100644 index 000000000..7675a6b37 --- /dev/null +++ b/datatransfer/message/message1_1prime/transfer_request.go @@ -0,0 +1,146 @@ +package message1_1 + +import ( + "io" + + datatransfer2 "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/datatransfer/encoding" + "github.com/filecoin-project/boost/datatransfer/message/types" + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + "github.com/ipld/go-ipld-prime/datamodel" + "github.com/ipld/go-ipld-prime/schema" + "github.com/libp2p/go-libp2p/core/protocol" + "golang.org/x/xerrors" +) + +// TransferRequest1_1 is a struct for the 1.1 Data Transfer Protocol that fulfills the datatransfer.Request interface. +// its members are exported to be used by cbor-gen +type TransferRequest1_1 struct { + BaseCidPtr *cid.Cid + MessageType uint64 + Pause bool + Partial bool + Pull bool + SelectorPtr *datamodel.Node + VoucherPtr *datamodel.Node + VoucherTypeIdentifier datatransfer2.TypeIdentifier + TransferId uint64 + RestartChannel datatransfer2.ChannelID +} + +func (trq *TransferRequest1_1) MessageForProtocol(targetProtocol protocol.ID) (datatransfer2.Message, error) { + switch targetProtocol { + case datatransfer2.ProtocolDataTransfer1_2: + return trq, nil + default: + return nil, xerrors.Errorf("protocol not supported") + } +} + +// IsRequest always returns true in this case because this is a transfer request +func (trq *TransferRequest1_1) IsRequest() bool { + return true +} + +func (trq *TransferRequest1_1) IsRestart() bool { + return trq.MessageType == uint64(types.RestartMessage) +} + +func (trq *TransferRequest1_1) IsRestartExistingChannelRequest() bool { + return trq.MessageType == uint64(types.RestartExistingChannelRequestMessage) +} + +func (trq *TransferRequest1_1) RestartChannelId() (datatransfer2.ChannelID, error) { + if !trq.IsRestartExistingChannelRequest() { + return datatransfer2.ChannelID{}, xerrors.New("not a restart request") + } + return trq.RestartChannel, nil +} + +func (trq *TransferRequest1_1) IsNew() bool { + return trq.MessageType == uint64(types.NewMessage) +} + +func (trq *TransferRequest1_1) IsUpdate() bool { + return trq.MessageType == uint64(types.UpdateMessage) +} + +func (trq *TransferRequest1_1) IsVoucher() bool { + return trq.MessageType == uint64(types.VoucherMessage) || trq.MessageType == uint64(types.NewMessage) +} + +func (trq *TransferRequest1_1) IsPaused() bool { + return trq.Pause +} + +func (trq *TransferRequest1_1) TransferID() datatransfer2.TransferID { + return datatransfer2.TransferID(trq.TransferId) +} + +// ========= datatransfer.Request interface +// IsPull returns true if this is a data pull request +func (trq *TransferRequest1_1) IsPull() bool { + return trq.Pull +} + +// VoucherType returns the Voucher ID +func (trq *TransferRequest1_1) VoucherType() datatransfer2.TypeIdentifier { + return trq.VoucherTypeIdentifier +} + +// Voucher returns the Voucher bytes +func (trq *TransferRequest1_1) Voucher(decoder encoding.Decoder) (encoding.Encodable, error) { + if trq.VoucherPtr == nil { + return nil, xerrors.New("No voucher present to read") + } + return decoder.DecodeFromNode(*trq.VoucherPtr) +} + +func (trq *TransferRequest1_1) EmptyVoucher() bool { + return trq.VoucherTypeIdentifier == datatransfer2.EmptyTypeIdentifier +} + +// BaseCid returns the Base CID +func (trq *TransferRequest1_1) BaseCid() cid.Cid { + if trq.BaseCidPtr == nil { + return cid.Undef + } + return *trq.BaseCidPtr +} + +// Selector returns the message Selector bytes +func (trq *TransferRequest1_1) Selector() (datamodel.Node, error) { + if trq.SelectorPtr == nil { + return nil, xerrors.New("No selector present to read") + } + return *trq.SelectorPtr, nil +} + +// IsCancel returns true if this is a cancel request +func (trq *TransferRequest1_1) IsCancel() bool { + return trq.MessageType == uint64(types.CancelMessage) +} + +// IsPartial returns true if this is a partial request +func (trq *TransferRequest1_1) IsPartial() bool { + return trq.Partial +} + +func (trsp *TransferRequest1_1) toIPLD() schema.TypedNode { + msg := TransferMessage1_1{ + IsRequest: true, + Request: trsp, + Response: nil, + } + return msg.toIPLD() +} + +func (trq *TransferRequest1_1) ToIPLD() (datamodel.Node, error) { + return trq.toIPLD().Representation(), nil +} + +// ToNet serializes a transfer request. +func (trq *TransferRequest1_1) ToNet(w io.Writer) error { + return dagcbor.Encode(trq.toIPLD().Representation(), w) +} diff --git a/datatransfer/message/message1_1prime/transfer_response.go b/datatransfer/message/message1_1prime/transfer_response.go new file mode 100644 index 000000000..2b416b60a --- /dev/null +++ b/datatransfer/message/message1_1prime/transfer_response.go @@ -0,0 +1,115 @@ +package message1_1 + +import ( + "io" + + datatransfer2 "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/datatransfer/encoding" + "github.com/filecoin-project/boost/datatransfer/message/types" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + "github.com/ipld/go-ipld-prime/datamodel" + "github.com/ipld/go-ipld-prime/schema" + "github.com/libp2p/go-libp2p/core/protocol" + "golang.org/x/xerrors" +) + +// TransferResponse1_1 is a private struct that satisfies the datatransfer.Response interface +// It is the response message for the Data Transfer 1.1 and 1.2 Protocol. +type TransferResponse1_1 struct { + MessageType uint64 + RequestAccepted bool + Paused bool + TransferId uint64 + VoucherResultPtr *datamodel.Node + VoucherTypeIdentifier datatransfer2.TypeIdentifier +} + +func (trsp *TransferResponse1_1) TransferID() datatransfer2.TransferID { + return datatransfer2.TransferID(trsp.TransferId) +} + +// IsRequest always returns false in this case because this is a transfer response +func (trsp *TransferResponse1_1) IsRequest() bool { + return false +} + +// IsNew returns true if this is the first response sent +func (trsp *TransferResponse1_1) IsNew() bool { + return trsp.MessageType == uint64(types.NewMessage) +} + +// IsUpdate returns true if this response is an update +func (trsp *TransferResponse1_1) IsUpdate() bool { + return trsp.MessageType == uint64(types.UpdateMessage) +} + +// IsPaused returns true if the responder is paused +func (trsp *TransferResponse1_1) IsPaused() bool { + return trsp.Paused +} + +// IsCancel returns true if the responder has cancelled this response +func (trsp *TransferResponse1_1) IsCancel() bool { + return trsp.MessageType == uint64(types.CancelMessage) +} + +// IsComplete returns true if the responder has completed this response +func (trsp *TransferResponse1_1) IsComplete() bool { + return trsp.MessageType == uint64(types.CompleteMessage) +} + +func (trsp *TransferResponse1_1) IsVoucherResult() bool { + return trsp.MessageType == uint64(types.VoucherResultMessage) || trsp.MessageType == uint64(types.NewMessage) || trsp.MessageType == uint64(types.CompleteMessage) || + trsp.MessageType == uint64(types.RestartMessage) +} + +// Accepted returns true if the request is accepted in the response +func (trsp *TransferResponse1_1) Accepted() bool { + return trsp.RequestAccepted +} + +func (trsp *TransferResponse1_1) VoucherResultType() datatransfer2.TypeIdentifier { + return trsp.VoucherTypeIdentifier +} + +func (trsp *TransferResponse1_1) VoucherResult(decoder encoding.Decoder) (encoding.Encodable, error) { + if trsp.VoucherResultPtr == nil { + return nil, xerrors.New("No voucher present to read") + } + return decoder.DecodeFromNode(*trsp.VoucherResultPtr) +} + +func (trq *TransferResponse1_1) IsRestart() bool { + return trq.MessageType == uint64(types.RestartMessage) +} + +func (trsp *TransferResponse1_1) EmptyVoucherResult() bool { + return trsp.VoucherTypeIdentifier == datatransfer2.EmptyTypeIdentifier +} + +func (trsp *TransferResponse1_1) MessageForProtocol(targetProtocol protocol.ID) (datatransfer2.Message, error) { + switch targetProtocol { + case datatransfer2.ProtocolDataTransfer1_2: + return trsp, nil + default: + return nil, xerrors.Errorf("protocol %s not supported", targetProtocol) + } +} + +func (trsp *TransferResponse1_1) toIPLD() schema.TypedNode { + msg := TransferMessage1_1{ + IsRequest: false, + Request: nil, + Response: trsp, + } + return msg.toIPLD() +} + +func (trsp *TransferResponse1_1) ToIPLD() (datamodel.Node, error) { + return trsp.toIPLD().Representation(), nil +} + +// ToNet serializes a transfer response. +func (trsp *TransferResponse1_1) ToNet(w io.Writer) error { + return dagcbor.Encode(trsp.toIPLD().Representation(), w) +} diff --git a/datatransfer/message/types/message_types.go b/datatransfer/message/types/message_types.go new file mode 100644 index 000000000..3144df0a2 --- /dev/null +++ b/datatransfer/message/types/message_types.go @@ -0,0 +1,16 @@ +package types + +type MessageType uint64 + +// Always append at the end to avoid breaking backward compatibility for cbor messages +const ( + NewMessage MessageType = iota + UpdateMessage + CancelMessage + CompleteMessage + VoucherMessage + VoucherResultMessage + + RestartMessage + RestartExistingChannelRequestMessage +) diff --git a/datatransfer/network/interface.go b/datatransfer/network/interface.go new file mode 100644 index 000000000..39d9af908 --- /dev/null +++ b/datatransfer/network/interface.go @@ -0,0 +1,57 @@ +package network + +import ( + "context" + + "github.com/filecoin-project/boost/datatransfer" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" +) + +// DataTransferNetwork provides network connectivity for GraphSync. +type DataTransferNetwork interface { + Protect(id peer.ID, tag string) + Unprotect(id peer.ID, tag string) bool + + // SendMessage sends a GraphSync message to a peer. + SendMessage( + context.Context, + peer.ID, + datatransfer.Message) error + + // SetDelegate registers the Reciver to handle messages received from the + // network. + SetDelegate(Receiver) + + // ConnectTo establishes a connection to the given peer + ConnectTo(context.Context, peer.ID) error + + // ConnectWithRetry establishes a connection to the given peer, retrying if + // necessary, and opens a stream on the data-transfer protocol to verify + // the peer will accept messages on the protocol + ConnectWithRetry(ctx context.Context, p peer.ID) error + + // ID returns the peer id of this libp2p host + ID() peer.ID + + // Protocol returns the protocol version of the peer, connecting to + // the peer if necessary + Protocol(context.Context, peer.ID) (protocol.ID, error) +} + +// Receiver is an interface for receiving messages from the GraphSyncNetwork. +type Receiver interface { + ReceiveRequest( + ctx context.Context, + sender peer.ID, + incoming datatransfer.Request) + + ReceiveResponse( + ctx context.Context, + sender peer.ID, + incoming datatransfer.Response) + + ReceiveRestartExistingChannelRequest(ctx context.Context, sender peer.ID, incoming datatransfer.Request) + + ReceiveError(error) +} diff --git a/datatransfer/network/libp2p_impl.go b/datatransfer/network/libp2p_impl.go new file mode 100644 index 000000000..1db400993 --- /dev/null +++ b/datatransfer/network/libp2p_impl.go @@ -0,0 +1,355 @@ +package network + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/datatransfer/message" + logging "github.com/ipfs/go-log/v2" + "github.com/jpillora/backoff" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" + "golang.org/x/xerrors" +) + +var log = logging.Logger("data_transfer_network") + +// The maximum amount of time to wait to open a stream +const defaultOpenStreamTimeout = 10 * time.Second + +// The maximum time to wait for a message to be sent +var defaultSendMessageTimeout = 10 * time.Second + +// The max number of attempts to open a stream +const defaultMaxStreamOpenAttempts = 5 + +// The min backoff time between retries +const defaultMinAttemptDuration = 1 * time.Second + +// The max backoff time between retries +const defaultMaxAttemptDuration = 5 * time.Minute + +// The multiplier in the backoff time for each retry +const defaultBackoffFactor = 5 + +var defaultDataTransferProtocols = []protocol.ID{ + datatransfer.ProtocolDataTransfer1_2, +} + +// Option is an option for configuring the libp2p storage market network +type Option func(*libp2pDataTransferNetwork) + +// DataTransferProtocols OVERWRITES the default libp2p protocols we use for data transfer with the given protocols. +func DataTransferProtocols(protocols []protocol.ID) Option { + return func(impl *libp2pDataTransferNetwork) { + impl.setDataTransferProtocols(protocols) + } +} + +// SendMessageParameters changes the default parameters around sending messages +func SendMessageParameters(openStreamTimeout time.Duration, sendMessageTimeout time.Duration) Option { + return func(impl *libp2pDataTransferNetwork) { + impl.sendMessageTimeout = sendMessageTimeout + impl.openStreamTimeout = openStreamTimeout + } +} + +// RetryParameters changes the default parameters around connection reopening +func RetryParameters(minDuration time.Duration, maxDuration time.Duration, attempts float64, backoffFactor float64) Option { + return func(impl *libp2pDataTransferNetwork) { + impl.maxStreamOpenAttempts = attempts + impl.minAttemptDuration = minDuration + impl.maxAttemptDuration = maxDuration + impl.backoffFactor = backoffFactor + } +} + +// NewFromLibp2pHost returns a GraphSyncNetwork supported by underlying Libp2p host. +func NewFromLibp2pHost(host host.Host, options ...Option) DataTransferNetwork { + dataTransferNetwork := libp2pDataTransferNetwork{ + host: host, + + openStreamTimeout: defaultOpenStreamTimeout, + sendMessageTimeout: defaultSendMessageTimeout, + maxStreamOpenAttempts: defaultMaxStreamOpenAttempts, + minAttemptDuration: defaultMinAttemptDuration, + maxAttemptDuration: defaultMaxAttemptDuration, + backoffFactor: defaultBackoffFactor, + } + dataTransferNetwork.setDataTransferProtocols(defaultDataTransferProtocols) + + for _, option := range options { + option(&dataTransferNetwork) + } + + return &dataTransferNetwork +} + +// libp2pDataTransferNetwork transforms the libp2p host interface, which sends and receives +// NetMessage objects, into the data transfer network interface. +type libp2pDataTransferNetwork struct { + host host.Host + // inbound messages from the network are forwarded to the receiver + receiver Receiver + + openStreamTimeout time.Duration + sendMessageTimeout time.Duration + maxStreamOpenAttempts float64 + minAttemptDuration time.Duration + maxAttemptDuration time.Duration + dtProtocols []protocol.ID + dtProtocolStrings []string + backoffFactor float64 +} + +func (impl *libp2pDataTransferNetwork) openStream(ctx context.Context, id peer.ID, protocols ...protocol.ID) (network.Stream, error) { + b := &backoff.Backoff{ + Min: impl.minAttemptDuration, + Max: impl.maxAttemptDuration, + Factor: impl.backoffFactor, + Jitter: true, + } + + start := time.Now() + for { + tctx, cancel := context.WithTimeout(ctx, impl.openStreamTimeout) + defer cancel() + + // will use the first among the given protocols that the remote peer supports + at := time.Now() + s, err := impl.host.NewStream(tctx, id, protocols...) + if err == nil { + nAttempts := b.Attempt() + 1 + if b.Attempt() > 0 { + log.Debugf("opened stream to %s on attempt %g of %g after %s", + id, nAttempts, impl.maxStreamOpenAttempts, time.Since(start)) + } + + return s, err + } + + // b.Attempt() starts from zero + nAttempts := b.Attempt() + 1 + if nAttempts >= impl.maxStreamOpenAttempts { + return nil, xerrors.Errorf("exhausted %g attempts but failed to open stream to %s, err: %w", impl.maxStreamOpenAttempts, id, err) + } + + d := b.Duration() + log.Warnf("failed to open stream to %s on attempt %g of %g after %s, waiting %s to try again, err: %s", + id, nAttempts, impl.maxStreamOpenAttempts, time.Since(at), d, err) + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(d): + } + } +} + +func (dtnet *libp2pDataTransferNetwork) SendMessage( + ctx context.Context, + p peer.ID, + outgoing datatransfer.Message) error { + + ctx, span := otel.Tracer("data-transfer").Start(ctx, "sendMessage", trace.WithAttributes( + attribute.String("to", p.String()), + attribute.Int64("transferID", int64(outgoing.TransferID())), + attribute.Bool("isRequest", outgoing.IsRequest()), + attribute.Bool("isNew", outgoing.IsNew()), + attribute.Bool("isRestart", outgoing.IsRestart()), + attribute.Bool("isUpdate", outgoing.IsUpdate()), + attribute.Bool("isCancel", outgoing.IsCancel()), + attribute.Bool("isPaused", outgoing.IsPaused()), + )) + + defer span.End() + s, err := dtnet.openStream(ctx, p, dtnet.dtProtocols...) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + return err + } + + outgoing, err = outgoing.MessageForProtocol(s.Protocol()) + if err != nil { + err = xerrors.Errorf("failed to convert message for protocol: %w", err) + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + return err + } + + if err = dtnet.msgToStream(ctx, s, outgoing); err != nil { + if err2 := s.Reset(); err2 != nil { + log.Error(err) + span.RecordError(err2) + span.SetStatus(codes.Error, err2.Error()) + return err2 + } + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + return err + } + + return s.Close() +} + +func (dtnet *libp2pDataTransferNetwork) SetDelegate(r Receiver) { + dtnet.receiver = r + for _, p := range dtnet.dtProtocols { + dtnet.host.SetStreamHandler(p, dtnet.handleNewStream) + } +} + +func (dtnet *libp2pDataTransferNetwork) ConnectTo(ctx context.Context, p peer.ID) error { + return dtnet.host.Connect(ctx, peer.AddrInfo{ID: p}) +} + +// ConnectWithRetry establishes a connection to the given peer, retrying if +// necessary, and opens a stream on the data-transfer protocol to verify +// the peer will accept messages on the protocol +func (dtnet *libp2pDataTransferNetwork) ConnectWithRetry(ctx context.Context, p peer.ID) error { + // Open a stream over the data-transfer protocol, to make sure that the + // peer is listening on the protocol + s, err := dtnet.openStream(ctx, p, dtnet.dtProtocols...) + if err != nil { + return err + } + + // We don't actually use the stream, we just open it to verify it's + // possible to connect over the data-transfer protocol, so we close it here + return s.Close() +} + +// handleNewStream receives a new stream from the network. +func (dtnet *libp2pDataTransferNetwork) handleNewStream(s network.Stream) { + defer s.Close() // nolint: errcheck,gosec + + if dtnet.receiver == nil { + s.Reset() // nolint: errcheck,gosec + return + } + + p := s.Conn().RemotePeer() + for { + var received datatransfer.Message + var err error + switch s.Protocol() { + case datatransfer.ProtocolDataTransfer1_2: + received, err = message.FromNet(s) + } + + if err != nil { + if err != io.EOF && err != io.ErrUnexpectedEOF { + s.Reset() // nolint: errcheck,gosec + go dtnet.receiver.ReceiveError(err) + log.Debugf("net handleNewStream from %s error: %s", p, err) + } + return + } + + ctx := context.Background() + log.Debugf("net handleNewStream from %s", p) + + if received.IsRequest() { + receivedRequest, ok := received.(datatransfer.Request) + if ok { + if receivedRequest.IsRestartExistingChannelRequest() { + dtnet.receiver.ReceiveRestartExistingChannelRequest(ctx, p, receivedRequest) + } else { + dtnet.receiver.ReceiveRequest(ctx, p, receivedRequest) + } + } + } else { + receivedResponse, ok := received.(datatransfer.Response) + if ok { + dtnet.receiver.ReceiveResponse(ctx, p, receivedResponse) + } + } + } +} + +func (dtnet *libp2pDataTransferNetwork) ID() peer.ID { + return dtnet.host.ID() +} + +func (dtnet *libp2pDataTransferNetwork) Protect(id peer.ID, tag string) { + dtnet.host.ConnManager().Protect(id, tag) +} + +func (dtnet *libp2pDataTransferNetwork) Unprotect(id peer.ID, tag string) bool { + return dtnet.host.ConnManager().Unprotect(id, tag) +} + +func (dtnet *libp2pDataTransferNetwork) msgToStream(ctx context.Context, s network.Stream, msg datatransfer.Message) error { + if msg.IsRequest() { + log.Debugf("Outgoing request message for transfer ID: %d", msg.TransferID()) + } + + deadline := time.Now().Add(dtnet.sendMessageTimeout) + if dl, ok := ctx.Deadline(); ok { + deadline = dl + } + if err := s.SetWriteDeadline(deadline); err != nil { + log.Warnf("error setting deadline: %s", err) + } + defer func() { + if err := s.SetWriteDeadline(time.Time{}); err != nil { + log.Warnf("error resetting deadline: %s", err) + } + }() + + switch s.Protocol() { + case datatransfer.ProtocolDataTransfer1_2: + default: + return fmt.Errorf("unrecognized protocol on remote: %s", s.Protocol()) + } + + if err := msg.ToNet(s); err != nil { + log.Debugf("error: %s", err) + return err + } + + return nil +} + +func (impl *libp2pDataTransferNetwork) Protocol(ctx context.Context, id peer.ID) (protocol.ID, error) { + // Check the cache for the peer's protocol version + firstProto, err := impl.host.Peerstore().FirstSupportedProtocol(id, impl.dtProtocols...) + if err != nil { + return "", err + } + + if firstProto != "" { + return protocol.ID(firstProto), nil + } + + // The peer's protocol version is not in the cache, so connect to the peer. + // Note that when the stream is opened, the peer's protocol will be added + // to the cache. + s, err := impl.openStream(ctx, id, impl.dtProtocols...) + if err != nil { + return "", err + } + _ = s.Close() + + return s.Protocol(), nil +} + +func (impl *libp2pDataTransferNetwork) setDataTransferProtocols(protocols []protocol.ID) { + impl.dtProtocols = append([]protocol.ID{}, protocols...) + + // Keep a string version of the protocols for performance reasons + impl.dtProtocolStrings = make([]string, 0, len(impl.dtProtocols)) + for _, proto := range impl.dtProtocols { + impl.dtProtocolStrings = append(impl.dtProtocolStrings, string(proto)) + } +} diff --git a/datatransfer/registry/registry.go b/datatransfer/registry/registry.go new file mode 100644 index 000000000..2cdc7ff0d --- /dev/null +++ b/datatransfer/registry/registry.go @@ -0,0 +1,81 @@ +package registry + +import ( + "sync" + + "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/datatransfer/encoding" + "golang.org/x/xerrors" +) + +// Processor is an interface that processes a certain type of encodable objects +// in a registry. The actual specifics of the interface that must be satisfied are +// left to the user of the registry +type Processor interface{} + +type registryEntry struct { + decoder encoding.Decoder + processor Processor +} + +// Registry maintans a register of types of encodable objects and a corresponding +// processor for those objects +// The encodable types must have a method Type() that specifies and identifier +// so they correct decoding function and processor can be identified based +// on this unique identifier +type Registry struct { + registryLk sync.RWMutex + entries map[datatransfer.TypeIdentifier]registryEntry +} + +// NewRegistry initialzes a new registy +func NewRegistry() *Registry { + return &Registry{ + entries: make(map[datatransfer.TypeIdentifier]registryEntry), + } +} + +// Register registers the given processor for the given entry type +func (r *Registry) Register(entry datatransfer.Registerable, processor Processor) error { + identifier := entry.Type() + decoder, err := encoding.NewDecoder(entry) + if err != nil { + return xerrors.Errorf("registering entry type %s: %w", identifier, err) + } + r.registryLk.Lock() + defer r.registryLk.Unlock() + if _, ok := r.entries[identifier]; ok { + return xerrors.Errorf("identifier already registered: %s", identifier) + } + r.entries[identifier] = registryEntry{decoder, processor} + return nil +} + +// Decoder gets a decoder for the given identifier +func (r *Registry) Decoder(identifier datatransfer.TypeIdentifier) (encoding.Decoder, bool) { + r.registryLk.RLock() + entry, has := r.entries[identifier] + r.registryLk.RUnlock() + return entry.decoder, has +} + +// Processor gets the processing interface for the given identifer +func (r *Registry) Processor(identifier datatransfer.TypeIdentifier) (Processor, bool) { + r.registryLk.RLock() + entry, has := r.entries[identifier] + r.registryLk.RUnlock() + return entry.processor, has +} + +// Each iterates through all of the entries in this registry +func (r *Registry) Each(process func(datatransfer.TypeIdentifier, encoding.Decoder, Processor) error) error { + r.registryLk.RLock() + defer r.registryLk.RUnlock() + for identifier, entry := range r.entries { + err := process(identifier, entry.decoder, entry.processor) + if err != nil { + return err + } + } + return nil +} diff --git a/datatransfer/testutil/testutils.go b/datatransfer/testutil/testutils.go new file mode 100644 index 000000000..d4606fc24 --- /dev/null +++ b/datatransfer/testutil/testutils.go @@ -0,0 +1,24 @@ +package testutil + +import ( + "testing" + + "github.com/filecoin-project/boost/datatransfer" + "github.com/stretchr/testify/require" + "golang.org/x/net/context" +) + +// StartAndWaitForReady is a utility function to start a module and verify it reaches the ready state +func StartAndWaitForReady(ctx context.Context, t *testing.T, manager datatransfer.Manager) { + ready := make(chan error, 1) + manager.OnReady(func(err error) { + ready <- err + }) + require.NoError(t, manager.Start(ctx)) + select { + case <-ctx.Done(): + t.Fatal("did not finish starting up module") + case err := <-ready: + require.NoError(t, err) + } +} diff --git a/datatransfer/tracing/tracing.go b/datatransfer/tracing/tracing.go new file mode 100644 index 000000000..d43be30b7 --- /dev/null +++ b/datatransfer/tracing/tracing.go @@ -0,0 +1,64 @@ +package tracing + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + + "github.com/filecoin-project/boost/datatransfer" +) + +type SpansIndex struct { + spansLk sync.RWMutex + spans map[datatransfer.ChannelID]trace.Span +} + +func NewSpansIndex() *SpansIndex { + return &SpansIndex{ + spans: make(map[datatransfer.ChannelID]trace.Span), + } +} + +func (si *SpansIndex) SpanForChannel(ctx context.Context, chid datatransfer.ChannelID) (context.Context, trace.Span) { + si.spansLk.RLock() + span, ok := si.spans[chid] + si.spansLk.RUnlock() + if ok { + return trace.ContextWithSpan(ctx, span), span + } + si.spansLk.Lock() + defer si.spansLk.Unlock() + // need to recheck under the write lock + span, ok = si.spans[chid] + if ok { + return trace.ContextWithSpan(ctx, span), span + } + ctx, span = otel.Tracer("data-transfer").Start(ctx, "transfer", trace.WithAttributes( + attribute.String("channelID", chid.String()), + )) + si.spans[chid] = span + return ctx, span +} + +func (si *SpansIndex) EndChannelSpan(chid datatransfer.ChannelID) { + si.spansLk.Lock() + defer si.spansLk.Unlock() + span, ok := si.spans[chid] + if ok { + span.End() + delete(si.spans, chid) + } +} + +func (si *SpansIndex) EndAll() { + si.spansLk.Lock() + defer si.spansLk.Unlock() + for _, span := range si.spans { + span.End() + } + // reset in case someone continues to use the span index + si.spans = make(map[datatransfer.ChannelID]trace.Span) +} diff --git a/datatransfer/transport/graphsync/extension/gsextension.go b/datatransfer/transport/graphsync/extension/gsextension.go new file mode 100644 index 000000000..3c49cf86a --- /dev/null +++ b/datatransfer/transport/graphsync/extension/gsextension.go @@ -0,0 +1,83 @@ +package extension + +import ( + "errors" + + "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/datatransfer/message" + "github.com/ipld/go-ipld-prime/datamodel" + "github.com/libp2p/go-libp2p/core/protocol" + + graphsync "github.com/filecoin-project/boost-graphsync" +) + +const ( + // ExtensionIncomingRequest1_1 is the identifier for data sent by the IncomingRequest hook + ExtensionIncomingRequest1_1 = graphsync.ExtensionName("fil/data-transfer/incoming-request/1.1") + // ExtensionOutgoingBlock1_1 is the identifier for data sent by the OutgoingBlock hook + ExtensionOutgoingBlock1_1 = graphsync.ExtensionName("fil/data-transfer/outgoing-block/1.1") + // ExtensionDataTransfer1_1 is the identifier for the v1.1 data transfer extension to graphsync + ExtensionDataTransfer1_1 = graphsync.ExtensionName("fil/data-transfer/1.1") +) + +// ProtocolMap maps graphsync extensions to their libp2p protocols +var ProtocolMap = map[graphsync.ExtensionName]protocol.ID{ + ExtensionIncomingRequest1_1: datatransfer.ProtocolDataTransfer1_2, + ExtensionOutgoingBlock1_1: datatransfer.ProtocolDataTransfer1_2, + ExtensionDataTransfer1_1: datatransfer.ProtocolDataTransfer1_2, +} + +// ToExtensionData converts a message to a graphsync extension +func ToExtensionData(msg datatransfer.Message, supportedExtensions []graphsync.ExtensionName) ([]graphsync.ExtensionData, error) { + exts := make([]graphsync.ExtensionData, 0, len(supportedExtensions)) + for _, supportedExtension := range supportedExtensions { + protoID, ok := ProtocolMap[supportedExtension] + if !ok { + return nil, errors.New("unsupported protocol") + } + versionedMsg, err := msg.MessageForProtocol(protoID) + if err != nil { + continue + } + nd, err := versionedMsg.ToIPLD() + if err != nil { + return nil, err + } + exts = append(exts, graphsync.ExtensionData{ + Name: supportedExtension, + Data: nd, + }) + } + if len(exts) == 0 { + return nil, errors.New("message not encodable in any supported extensions") + } + return exts, nil +} + +// GsExtended is a small interface used by GetTransferData +type GsExtended interface { + Extension(name graphsync.ExtensionName) (datamodel.Node, bool) +} + +// GetTransferData unmarshals extension data. +// Returns: +// - nil + nil if the extension is not found +// - nil + error if the extendedData fails to unmarshal +// - unmarshaled ExtensionDataTransferData + nil if all goes well +func GetTransferData(extendedData GsExtended, extNames []graphsync.ExtensionName) (datatransfer.Message, error) { + for _, name := range extNames { + data, ok := extendedData.Extension(name) + if ok { + return decoders[name](data) + } + } + return nil, nil +} + +type decoder func(datamodel.Node) (datatransfer.Message, error) + +var decoders = map[graphsync.ExtensionName]decoder{ + ExtensionIncomingRequest1_1: message.FromIPLD, + ExtensionOutgoingBlock1_1: message.FromIPLD, + ExtensionDataTransfer1_1: message.FromIPLD, +} diff --git a/datatransfer/transport/graphsync/graphsync.go b/datatransfer/transport/graphsync/graphsync.go new file mode 100644 index 000000000..50b2d88b7 --- /dev/null +++ b/datatransfer/transport/graphsync/graphsync.go @@ -0,0 +1,1312 @@ +package graphsync + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + datatransfer2 "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/datatransfer/transport/graphsync/extension" + logging "github.com/ipfs/go-log/v2" + "github.com/ipld/go-ipld-prime" + "github.com/libp2p/go-libp2p/core/peer" + "golang.org/x/sync/errgroup" + "golang.org/x/xerrors" + + graphsync "github.com/filecoin-project/boost-graphsync" + "github.com/filecoin-project/boost-graphsync/donotsendfirstblocks" +) + +var log = logging.Logger("dt_graphsync") + +// When restarting a data transfer, we cancel the existing graphsync request +// before opening a new one. +// This constant defines the maximum time to wait for the request to be +// cancelled. +const maxGSCancelWait = time.Second + +var defaultSupportedExtensions = []graphsync.ExtensionName{ + extension.ExtensionDataTransfer1_1, +} + +var incomingReqExtensions = []graphsync.ExtensionName{ + extension.ExtensionIncomingRequest1_1, + extension.ExtensionDataTransfer1_1, +} + +var outgoingBlkExtensions = []graphsync.ExtensionName{ + extension.ExtensionOutgoingBlock1_1, + extension.ExtensionDataTransfer1_1, +} + +// Option is an option for setting up the graphsync transport +type Option func(*Transport) + +// SupportedExtensions sets what data transfer extensions are supported +func SupportedExtensions(supportedExtensions []graphsync.ExtensionName) Option { + return func(t *Transport) { + t.supportedExtensions = supportedExtensions + } +} + +// RegisterCompletedRequestListener is used by the tests +func RegisterCompletedRequestListener(l func(channelID datatransfer2.ChannelID)) Option { + return func(t *Transport) { + t.completedRequestListener = l + } +} + +// RegisterCompletedResponseListener is used by the tests +func RegisterCompletedResponseListener(l func(channelID datatransfer2.ChannelID)) Option { + return func(t *Transport) { + t.completedResponseListener = l + } +} + +// Transport manages graphsync hooks for data transfer, translating from +// graphsync hooks to semantic data transfer events +type Transport struct { + events datatransfer2.EventsHandler + gs graphsync.GraphExchange + peerID peer.ID + + supportedExtensions []graphsync.ExtensionName + unregisterFuncs []graphsync.UnregisterHookFunc + completedRequestListener func(channelID datatransfer2.ChannelID) + completedResponseListener func(channelID datatransfer2.ChannelID) + + // Map from data transfer channel ID to information about that channel + dtChannelsLk sync.RWMutex + dtChannels map[datatransfer2.ChannelID]*dtChannel + + // Used in graphsync callbacks to map from graphsync request to the + // associated data-transfer channel ID. + requestIDToChannelID *requestIDToChannelIDMap +} + +// NewTransport makes a new hooks manager with the given hook events interface +func NewTransport(peerID peer.ID, gs graphsync.GraphExchange, options ...Option) *Transport { + t := &Transport{ + gs: gs, + peerID: peerID, + supportedExtensions: defaultSupportedExtensions, + dtChannels: make(map[datatransfer2.ChannelID]*dtChannel), + requestIDToChannelID: newRequestIDToChannelIDMap(), + } + for _, option := range options { + option(t) + } + return t +} + +// OpenChannel initiates an outgoing request for the other peer to send data +// to us on this channel +// Note: from a data transfer symantic standpoint, it doesn't matter if the +// request is push or pull -- OpenChannel is called by the party that is +// intending to receive data +func (t *Transport) OpenChannel( + ctx context.Context, + dataSender peer.ID, + channelID datatransfer2.ChannelID, + root ipld.Link, + stor ipld.Node, + channel datatransfer2.ChannelState, + msg datatransfer2.Message, +) error { + if t.events == nil { + return datatransfer2.ErrHandlerNotSet + } + + exts, err := extension.ToExtensionData(msg, t.supportedExtensions) + if err != nil { + return err + } + // If this is a restart request, the client can indicate the blocks that + // it has already received, so that the provider knows not to resend + // those blocks + restartExts, err := t.getRestartExtension(ctx, dataSender, channel) + if err != nil { + return err + } + exts = append(exts, restartExts...) + + // Start tracking the data-transfer channel + ch := t.trackDTChannel(channelID) + + // Open a graphsync request to the remote peer + req, err := ch.open(ctx, channelID, dataSender, root, stor, channel, exts) + if err != nil { + return err + } + + // Process incoming data + go t.executeGsRequest(req) + + return nil +} + +// Get the extension data for sending a Restart message, depending on the +// protocol version of the peer +func (t *Transport) getRestartExtension(ctx context.Context, p peer.ID, channel datatransfer2.ChannelState) ([]graphsync.ExtensionData, error) { + if channel == nil { + return nil, nil + } + return getDoNotSendFirstBlocksExtension(channel) +} + +// Skip the first N blocks because they were already received +func getDoNotSendFirstBlocksExtension(channel datatransfer2.ChannelState) ([]graphsync.ExtensionData, error) { + skipBlockCount := channel.ReceivedCidsTotal() + data := donotsendfirstblocks.EncodeDoNotSendFirstBlocks(skipBlockCount) + return []graphsync.ExtensionData{{ + Name: graphsync.ExtensionsDoNotSendFirstBlocks, + Data: data, + }}, nil +} + +// Read from the graphsync response and error channels until they are closed, +// and return the last error on the error channel +func (t *Transport) consumeResponses(req *gsReq) error { + var lastError error + for range req.responseChan { + } + log.Debugf("channel %s: finished consuming graphsync response channel", req.channelID) + + for err := range req.errChan { + lastError = err + } + log.Debugf("channel %s: finished consuming graphsync error channel", req.channelID) + + return lastError +} + +// Read from the graphsync response and error channels until they are closed +// or there is an error, then call the channel completed callback +func (t *Transport) executeGsRequest(req *gsReq) { + // Make sure to call the onComplete callback before returning + defer func() { + log.Infow("gs request complete for channel", "chid", req.channelID) + req.onComplete() + }() + + // Consume the response and error channels for the graphsync request + lastError := t.consumeResponses(req) + + // Request cancelled by client + if _, ok := lastError.(graphsync.RequestClientCancelledErr); ok { + terr := xerrors.Errorf("graphsync request cancelled") + log.Warnf("channel %s: %s", req.channelID, terr) + if err := t.events.OnRequestCancelled(req.channelID, terr); err != nil { + log.Error(err) + } + return + } + + // Request cancelled by responder + if _, ok := lastError.(graphsync.RequestCancelledErr); ok { + log.Infof("channel %s: graphsync request cancelled by responder", req.channelID) + // TODO Should we do anything for RequestCancelledErr ? + return + } + + if lastError != nil { + log.Warnf("channel %s: graphsync error: %s", req.channelID, lastError) + } + + log.Debugf("channel %s: finished executing graphsync request", req.channelID) + + var completeErr error + if lastError != nil { + completeErr = xerrors.Errorf("channel %s: graphsync request failed to complete: %w", req.channelID, lastError) + } + + // Used by the tests to listen for when a request completes + if t.completedRequestListener != nil { + t.completedRequestListener(req.channelID) + } + + err := t.events.OnChannelCompleted(req.channelID, completeErr) + if err != nil { + log.Errorf("channel %s: processing OnChannelCompleted: %s", req.channelID, err) + } +} + +// PauseChannel pauses the given data-transfer channel +func (t *Transport) PauseChannel(ctx context.Context, chid datatransfer2.ChannelID) error { + ch, err := t.getDTChannel(chid) + if err != nil { + return err + } + return ch.pause(ctx) +} + +// ResumeChannel resumes the given data-transfer channel and sends the message +// if there is one +func (t *Transport) ResumeChannel( + ctx context.Context, + msg datatransfer2.Message, + chid datatransfer2.ChannelID, +) error { + ch, err := t.getDTChannel(chid) + if err != nil { + return err + } + return ch.resume(ctx, msg) +} + +// CloseChannel closes the given data-transfer channel +func (t *Transport) CloseChannel(ctx context.Context, chid datatransfer2.ChannelID) error { + ch, err := t.getDTChannel(chid) + if err != nil { + return err + } + + err = ch.close(ctx) + if err != nil { + return xerrors.Errorf("closing channel: %w", err) + } + return nil +} + +// CleanupChannel is called on the otherside of a cancel - removes any associated +// data for the channel +func (t *Transport) CleanupChannel(chid datatransfer2.ChannelID) { + t.dtChannelsLk.Lock() + + ch, ok := t.dtChannels[chid] + if ok { + // Remove the reference to the channel from the channels map + delete(t.dtChannels, chid) + } + + t.dtChannelsLk.Unlock() + + // Clean up the channel + if ok { + ch.cleanup() + } +} + +// SetEventHandler sets the handler for events on channels +func (t *Transport) SetEventHandler(events datatransfer2.EventsHandler) error { + if t.events != nil { + return datatransfer2.ErrHandlerAlreadySet + } + t.events = events + + t.unregisterFuncs = append(t.unregisterFuncs, t.gs.RegisterIncomingRequestQueuedHook(t.gsReqQueuedHook)) + t.unregisterFuncs = append(t.unregisterFuncs, t.gs.RegisterIncomingRequestHook(t.gsReqRecdHook)) + t.unregisterFuncs = append(t.unregisterFuncs, t.gs.RegisterCompletedResponseListener(t.gsCompletedResponseListener)) + t.unregisterFuncs = append(t.unregisterFuncs, t.gs.RegisterIncomingBlockHook(t.gsIncomingBlockHook)) + t.unregisterFuncs = append(t.unregisterFuncs, t.gs.RegisterOutgoingBlockHook(t.gsOutgoingBlockHook)) + t.unregisterFuncs = append(t.unregisterFuncs, t.gs.RegisterBlockSentListener(t.gsBlockSentHook)) + t.unregisterFuncs = append(t.unregisterFuncs, t.gs.RegisterOutgoingRequestHook(t.gsOutgoingRequestHook)) + t.unregisterFuncs = append(t.unregisterFuncs, t.gs.RegisterIncomingResponseHook(t.gsIncomingResponseHook)) + t.unregisterFuncs = append(t.unregisterFuncs, t.gs.RegisterRequestUpdatedHook(t.gsRequestUpdatedHook)) + t.unregisterFuncs = append(t.unregisterFuncs, t.gs.RegisterRequestorCancelledListener(t.gsRequestorCancelledListener)) + t.unregisterFuncs = append(t.unregisterFuncs, t.gs.RegisterNetworkErrorListener(t.gsNetworkSendErrorListener)) + t.unregisterFuncs = append(t.unregisterFuncs, t.gs.RegisterReceiverNetworkErrorListener(t.gsNetworkReceiveErrorListener)) + return nil +} + +// Shutdown disconnects a transport interface from graphsync +func (t *Transport) Shutdown(ctx context.Context) error { + for _, unregisterFunc := range t.unregisterFuncs { + unregisterFunc() + } + + t.dtChannelsLk.Lock() + defer t.dtChannelsLk.Unlock() + + var eg errgroup.Group + for _, ch := range t.dtChannels { + ch := ch + eg.Go(func() error { + return ch.shutdown(ctx) + }) + } + + err := eg.Wait() + if err != nil { + return xerrors.Errorf("shutting down graphsync transport: %w", err) + } + return nil +} + +// UseStore tells the graphsync transport to use the given loader and storer for this channelID +func (t *Transport) UseStore(channelID datatransfer2.ChannelID, lsys ipld.LinkSystem) error { + ch := t.trackDTChannel(channelID) + return ch.useStore(lsys) +} + +// ChannelGraphsyncRequests describes any graphsync request IDs associated with a given channel +type ChannelGraphsyncRequests struct { + // Current is the current request ID for the transfer + Current graphsync.RequestID + // Previous are ids of previous GraphSync requests in a transfer that + // has been restarted. We may be interested to know if these IDs are active + // on either side of the request + Previous []graphsync.RequestID +} + +// ChannelsForPeer describes current active channels for a given peer and their +// associated graphsync requests +type ChannelsForPeer struct { + SendingChannels map[datatransfer2.ChannelID]ChannelGraphsyncRequests + ReceivingChannels map[datatransfer2.ChannelID]ChannelGraphsyncRequests +} + +// ChannelsForPeer identifies which channels are open and which request IDs they map to +func (t *Transport) ChannelsForPeer(p peer.ID) ChannelsForPeer { + t.dtChannelsLk.RLock() + defer t.dtChannelsLk.RUnlock() + + // cannot have active transfers with self + if p == t.peerID { + return ChannelsForPeer{ + SendingChannels: map[datatransfer2.ChannelID]ChannelGraphsyncRequests{}, + ReceivingChannels: map[datatransfer2.ChannelID]ChannelGraphsyncRequests{}, + } + } + + sending := make(map[datatransfer2.ChannelID]ChannelGraphsyncRequests) + receiving := make(map[datatransfer2.ChannelID]ChannelGraphsyncRequests) + // loop through every graphsync request key we're currently tracking + t.requestIDToChannelID.forEach(func(requestID graphsync.RequestID, isSending bool, chid datatransfer2.ChannelID) { + // if the associated channel ID includes the requested peer + if chid.Initiator == p || chid.Responder == p { + // determine whether the requested peer is one at least one end of the channel + // and whether we're receving from that peer or sending to it + collection := sending + if !isSending { + collection = receiving + } + channelGraphsyncRequests := collection[chid] + // finally, determine if the request key matches the current GraphSync key we're tracking for + // this channel, indicating it's the current graphsync request + if t.dtChannels[chid] != nil && t.dtChannels[chid].requestID != nil && (*t.dtChannels[chid].requestID) == requestID { + channelGraphsyncRequests.Current = requestID + } else { + // otherwise this id was a previous graphsync request on a channel that was restarted + // and it has not been cleaned up yet + channelGraphsyncRequests.Previous = append(channelGraphsyncRequests.Previous, requestID) + } + collection[chid] = channelGraphsyncRequests + } + }) + return ChannelsForPeer{ + SendingChannels: sending, + ReceivingChannels: receiving, + } +} + +// gsOutgoingRequestHook is called when a graphsync request is made +func (t *Transport) gsOutgoingRequestHook(p peer.ID, request graphsync.RequestData, hookActions graphsync.OutgoingRequestHookActions) { + message, _ := extension.GetTransferData(request, t.supportedExtensions) + + // extension not found; probably not our request. + if message == nil { + return + } + + // A graphsync request is made when either + // - The local node opens a data-transfer pull channel, so the local node + // sends a graphsync request to ask the remote peer for the data + // - The remote peer opened a data-transfer push channel, and in response + // the local node sends a graphsync request to ask for the data + var initiator peer.ID + var responder peer.ID + if message.IsRequest() { + // This is a pull request so the data-transfer initiator is the local node + initiator = t.peerID + responder = p + } else { + // This is a push response so the data-transfer initiator is the remote + // peer: They opened the push channel, we respond by sending a + // graphsync request for the data + initiator = p + responder = t.peerID + } + chid := datatransfer2.ChannelID{Initiator: initiator, Responder: responder, ID: message.TransferID()} + + // A data transfer channel was opened + err := t.events.OnChannelOpened(chid) + if err != nil { + // There was an error opening the channel, bail out + log.Errorf("processing OnChannelOpened for %s: %s", chid, err) + t.CleanupChannel(chid) + return + } + + // Start tracking the channel if we're not already + ch := t.trackDTChannel(chid) + + // Signal that the channel has been opened + ch.gsReqOpened(request.ID(), hookActions) +} + +// gsIncomingBlockHook is called when a block is received +func (t *Transport) gsIncomingBlockHook(p peer.ID, response graphsync.ResponseData, block graphsync.BlockData, hookActions graphsync.IncomingBlockHookActions) { + chid, ok := t.requestIDToChannelID.load(response.RequestID()) + if !ok { + return + } + + err := t.events.OnDataReceived(chid, block.Link(), block.BlockSize(), block.Index(), block.BlockSizeOnWire() != 0) + if err != nil && err != datatransfer2.ErrPause { + hookActions.TerminateWithError(err) + return + } + + if err == datatransfer2.ErrPause { + hookActions.PauseRequest() + } +} + +func (t *Transport) gsBlockSentHook(p peer.ID, request graphsync.RequestData, block graphsync.BlockData) { + // When a data transfer is restarted, the requester sends a list of CIDs + // that it already has. Graphsync calls the sent hook for all blocks even + // if they are in the list (meaning, they aren't actually sent over the + // wire). So here we check if the block was actually sent + // over the wire before firing the data sent event. + if block.BlockSizeOnWire() == 0 { + return + } + + chid, ok := t.requestIDToChannelID.load(request.ID()) + if !ok { + return + } + + if err := t.events.OnDataSent(chid, block.Link(), block.BlockSize(), block.Index(), block.BlockSizeOnWire() != 0); err != nil { + log.Errorf("failed to process data sent: %+v", err) + } +} + +func (t *Transport) gsOutgoingBlockHook(p peer.ID, request graphsync.RequestData, block graphsync.BlockData, hookActions graphsync.OutgoingBlockHookActions) { + // When a data transfer is restarted, the requester sends a list of CIDs + // that it already has. Graphsync calls the outgoing block hook for all + // blocks even if they are in the list (meaning, they aren't actually going + // to be sent over the wire). So here we check if the block is actually + // going to be sent over the wire before firing the data queued event. + if block.BlockSizeOnWire() == 0 { + return + } + + chid, ok := t.requestIDToChannelID.load(request.ID()) + if !ok { + return + } + + // OnDataQueued is called when a block is queued to be sent to the remote + // peer. It can return ErrPause to pause the response (eg if payment is + // required) and it can return a message that will be sent with the block + // (eg to ask for payment). + msg, err := t.events.OnDataQueued(chid, block.Link(), block.BlockSize(), block.Index(), block.BlockSizeOnWire() != 0) + if err != nil && err != datatransfer2.ErrPause { + hookActions.TerminateWithError(err) + return + } + + if err == datatransfer2.ErrPause { + hookActions.PauseResponse() + } + + if msg != nil { + // gsOutgoingBlockHook uses a unique extension name so it can be attached with data from a different hook + // outgoingBlkExtensions also includes the default extension name so it remains compatible with all data-transfer protocol versions out there + extensions, err := extension.ToExtensionData(msg, outgoingBlkExtensions) + if err != nil { + hookActions.TerminateWithError(err) + return + } + for _, extension := range extensions { + hookActions.SendExtensionData(extension) + } + } +} + +// gsReqQueuedHook is called when graphsync enqueues an incoming request for data +func (t *Transport) gsReqQueuedHook(p peer.ID, request graphsync.RequestData, hookActions graphsync.RequestQueuedHookActions) { + msg, err := extension.GetTransferData(request, t.supportedExtensions) + if err != nil { + log.Errorf("failed GetTransferData, req=%+v, err=%s", request, err) + } + // extension not found; probably not our request. + if msg == nil { + return + } + + var chid datatransfer2.ChannelID + if msg.IsRequest() { + // when a data transfer request comes in on graphsync, the remote peer + // initiated a pull + chid = datatransfer2.ChannelID{ID: msg.TransferID(), Initiator: p, Responder: t.peerID} + dtRequest := msg.(datatransfer2.Request) + if dtRequest.IsNew() { + log.Infof("%s, pull request queued, req_id=%d", chid, request.ID()) + t.events.OnTransferQueued(chid) + } else { + log.Infof("%s, pull restart request queued, req_id=%d", chid, request.ID()) + } + } else { + // when a data transfer response comes in on graphsync, this node + // initiated a push, and the remote peer responded with a request + // for data + chid = datatransfer2.ChannelID{ID: msg.TransferID(), Initiator: t.peerID, Responder: p} + response := msg.(datatransfer2.Response) + if response.IsNew() { + log.Infof("%s, GS pull request queued in response to our push, req_id=%d", chid, request.ID()) + t.events.OnTransferQueued(chid) + } else { + log.Infof("%s, GS pull request queued in response to our restart push, req_id=%d", chid, request.ID()) + } + } + augmentContext := t.events.OnContextAugment(chid) + if augmentContext != nil { + hookActions.AugmentContext(augmentContext) + } +} + +// gsReqRecdHook is called when graphsync receives an incoming request for data +func (t *Transport) gsReqRecdHook(p peer.ID, request graphsync.RequestData, hookActions graphsync.IncomingRequestHookActions) { + // if this is a push request the sender is us. + msg, err := extension.GetTransferData(request, t.supportedExtensions) + if err != nil { + hookActions.TerminateWithError(err) + return + } + + // extension not found; probably not our request. + if msg == nil { + return + } + + // An incoming graphsync request for data is received when either + // - The remote peer opened a data-transfer pull channel, so the local node + // receives a graphsync request for the data + // - The local node opened a data-transfer push channel, and in response + // the remote peer sent a graphsync request for the data, and now the + // local node receives that request for data + var chid datatransfer2.ChannelID + var responseMessage datatransfer2.Message + var ch *dtChannel + if msg.IsRequest() { + // when a data transfer request comes in on graphsync, the remote peer + // initiated a pull + chid = datatransfer2.ChannelID{ID: msg.TransferID(), Initiator: p, Responder: t.peerID} + + log.Debugf("%s: received request for data (pull), req_id=%d", chid, request.ID()) + + // Lock the channel for the duration of this method + ch = t.trackDTChannel(chid) + ch.lk.Lock() + defer ch.lk.Unlock() + + request := msg.(datatransfer2.Request) + responseMessage, err = t.events.OnRequestReceived(chid, request) + } else { + // when a data transfer response comes in on graphsync, this node + // initiated a push, and the remote peer responded with a request + // for data + chid = datatransfer2.ChannelID{ID: msg.TransferID(), Initiator: t.peerID, Responder: p} + + log.Debugf("%s: received request for data (push), req_id=%d", chid, request.ID()) + + // Lock the channel for the duration of this method + ch = t.trackDTChannel(chid) + ch.lk.Lock() + defer ch.lk.Unlock() + + response := msg.(datatransfer2.Response) + err = t.events.OnResponseReceived(chid, response) + } + + // If we need to send a response, add the response message as an extension + if responseMessage != nil { + // gsReqRecdHook uses a unique extension name so it can be attached with data from a different hook + // incomingReqExtensions also includes default extension name so it remains compatible with previous data-transfer + // protocol versions out there. + extensions, extensionErr := extension.ToExtensionData(responseMessage, incomingReqExtensions) + if extensionErr != nil { + hookActions.TerminateWithError(err) + return + } + for _, extension := range extensions { + hookActions.SendExtensionData(extension) + } + } + + if err != nil && err != datatransfer2.ErrPause { + hookActions.TerminateWithError(err) + return + } + + // Check if the callback indicated that the channel should be paused + // immediately (eg because data is still being unsealed) + paused := false + if err == datatransfer2.ErrPause { + log.Debugf("%s: pausing graphsync response", chid) + + paused = true + hookActions.PauseResponse() + } + + // If this is a restart request, and the data transfer still hasn't got + // out of the paused state (eg because we're still unsealing), start this + // graphsync response in the paused state. + if ch.isOpen && !ch.xferStarted && !paused { + log.Debugf("%s: pausing graphsync response after restart", chid) + + paused = true + hookActions.PauseResponse() + } + + // If the transfer is not paused, record that the transfer has started + if !paused { + ch.xferStarted = true + } + + ch.gsDataRequestRcvd(request.ID(), hookActions) + + hookActions.ValidateRequest() +} + +// gsCompletedResponseListener is a graphsync.OnCompletedResponseListener. We use it learn when the data transfer is complete +// for the side that is responding to a graphsync request +func (t *Transport) gsCompletedResponseListener(p peer.ID, request graphsync.RequestData, status graphsync.ResponseStatusCode) { + chid, ok := t.requestIDToChannelID.load(request.ID()) + if !ok { + return + } + + if status == graphsync.RequestCancelled { + return + } + + var completeErr error + if status != graphsync.RequestCompletedFull { + statusStr := gsResponseStatusCodeString(status) + completeErr = xerrors.Errorf("graphsync response to peer %s did not complete: response status code %s", p, statusStr) + } + + // Used by the tests to listen for when a response completes + if t.completedResponseListener != nil { + t.completedResponseListener(chid) + } + + err := t.events.OnChannelCompleted(chid, completeErr) + if err != nil { + log.Error(err) + } +} + +// Remove this map once this PR lands: https://github.com/ipfs/go-graphsync/pull/148 +var gsResponseStatusCodes = map[graphsync.ResponseStatusCode]string{ + graphsync.RequestAcknowledged: "RequestAcknowledged", + graphsync.AdditionalPeers: "AdditionalPeers", + graphsync.NotEnoughGas: "NotEnoughGas", + graphsync.OtherProtocol: "OtherProtocol", + graphsync.PartialResponse: "PartialResponse", + graphsync.RequestPaused: "RequestPaused", + graphsync.RequestCompletedFull: "RequestCompletedFull", + graphsync.RequestCompletedPartial: "RequestCompletedPartial", + graphsync.RequestRejected: "RequestRejected", + graphsync.RequestFailedBusy: "RequestFailedBusy", + graphsync.RequestFailedUnknown: "RequestFailedUnknown", + graphsync.RequestFailedLegal: "RequestFailedLegal", + graphsync.RequestFailedContentNotFound: "RequestFailedContentNotFound", + graphsync.RequestCancelled: "RequestCancelled", +} + +func gsResponseStatusCodeString(code graphsync.ResponseStatusCode) string { + str, ok := gsResponseStatusCodes[code] + if ok { + return str + } + return gsResponseStatusCodes[graphsync.RequestFailedUnknown] +} + +func (t *Transport) gsRequestUpdatedHook(p peer.ID, request graphsync.RequestData, update graphsync.RequestData, hookActions graphsync.RequestUpdatedHookActions) { + chid, ok := t.requestIDToChannelID.load(request.ID()) + if !ok { + return + } + + responseMessage, err := t.processExtension(chid, update, p, t.supportedExtensions) + + if responseMessage != nil { + extensions, extensionErr := extension.ToExtensionData(responseMessage, t.supportedExtensions) + if extensionErr != nil { + hookActions.TerminateWithError(err) + return + } + for _, extension := range extensions { + hookActions.SendExtensionData(extension) + } + } + + if err != nil && err != datatransfer2.ErrPause { + hookActions.TerminateWithError(err) + } + +} + +// gsIncomingResponseHook is a graphsync.OnIncomingResponseHook. We use it to pass on responses +func (t *Transport) gsIncomingResponseHook(p peer.ID, response graphsync.ResponseData, hookActions graphsync.IncomingResponseHookActions) { + chid, ok := t.requestIDToChannelID.load(response.RequestID()) + if !ok { + return + } + + responseMessage, err := t.processExtension(chid, response, p, incomingReqExtensions) + + if responseMessage != nil { + extensions, extensionErr := extension.ToExtensionData(responseMessage, t.supportedExtensions) + if extensionErr != nil { + hookActions.TerminateWithError(err) + return + } + for _, extension := range extensions { + hookActions.UpdateRequestWithExtensions(extension) + } + } + + if err != nil { + hookActions.TerminateWithError(err) + } + + // In a case where the transfer sends blocks immediately this extension may contain both a + // response message and a revalidation request so we trigger OnResponseReceived again for this + // specific extension name + _, err = t.processExtension(chid, response, p, []graphsync.ExtensionName{extension.ExtensionOutgoingBlock1_1}) + + if err != nil { + hookActions.TerminateWithError(err) + } +} + +func (t *Transport) processExtension(chid datatransfer2.ChannelID, gsMsg extension.GsExtended, p peer.ID, exts []graphsync.ExtensionName) (datatransfer2.Message, error) { + + // if this is a push request the sender is us. + msg, err := extension.GetTransferData(gsMsg, exts) + if err != nil { + return nil, err + } + + // extension not found; probably not our request. + if msg == nil { + return nil, nil + } + + if msg.IsRequest() { + + // only accept request message updates when original message was also request + if (chid != datatransfer2.ChannelID{ID: msg.TransferID(), Initiator: p, Responder: t.peerID}) { + return nil, errors.New("received request on response channel") + } + dtRequest := msg.(datatransfer2.Request) + return t.events.OnRequestReceived(chid, dtRequest) + } + + // only accept response message updates when original message was also response + if (chid != datatransfer2.ChannelID{ID: msg.TransferID(), Initiator: t.peerID, Responder: p}) { + return nil, errors.New("received response on request channel") + } + + dtResponse := msg.(datatransfer2.Response) + return nil, t.events.OnResponseReceived(chid, dtResponse) +} + +func (t *Transport) gsRequestorCancelledListener(p peer.ID, request graphsync.RequestData) { + chid, ok := t.requestIDToChannelID.load(request.ID()) + if !ok { + return + } + + ch, err := t.getDTChannel(chid) + if err != nil { + if !xerrors.Is(datatransfer2.ErrChannelNotFound, err) { + log.Errorf("requestor cancelled: getting channel %s: %s", chid, err) + } + return + } + + log.Debugf("%s: requester cancelled data-transfer", chid) + ch.onRequesterCancelled() +} + +// Called when there is a graphsync error sending data +func (t *Transport) gsNetworkSendErrorListener(p peer.ID, request graphsync.RequestData, gserr error) { + // Fire an error if the graphsync request was made by this node or the remote peer + chid, ok := t.requestIDToChannelID.load(request.ID()) + if !ok { + return + } + + err := t.events.OnSendDataError(chid, gserr) + if err != nil { + log.Errorf("failed to fire transport send error %s: %s", gserr, err) + } +} + +// Called when there is a graphsync error receiving data +func (t *Transport) gsNetworkReceiveErrorListener(p peer.ID, gserr error) { + // Fire a receive data error on all ongoing graphsync transfers with that + // peer + t.requestIDToChannelID.forEach(func(k graphsync.RequestID, sending bool, chid datatransfer2.ChannelID) { + if chid.Initiator != p && chid.Responder != p { + return + } + + err := t.events.OnReceiveDataError(chid, gserr) + if err != nil { + log.Errorf("failed to fire transport receive error %s: %s", gserr, err) + } + }) +} + +func (t *Transport) newDTChannel(chid datatransfer2.ChannelID) *dtChannel { + return &dtChannel{ + t: t, + channelID: chid, + opened: make(chan graphsync.RequestID, 1), + } +} + +func (t *Transport) trackDTChannel(chid datatransfer2.ChannelID) *dtChannel { + t.dtChannelsLk.Lock() + defer t.dtChannelsLk.Unlock() + + ch, ok := t.dtChannels[chid] + if !ok { + ch = t.newDTChannel(chid) + t.dtChannels[chid] = ch + } + + return ch +} + +func (t *Transport) getDTChannel(chid datatransfer2.ChannelID) (*dtChannel, error) { + if t.events == nil { + return nil, datatransfer2.ErrHandlerNotSet + } + + t.dtChannelsLk.RLock() + defer t.dtChannelsLk.RUnlock() + + ch, ok := t.dtChannels[chid] + if !ok { + return nil, xerrors.Errorf("channel %s: %w", chid, datatransfer2.ErrChannelNotFound) + } + return ch, nil +} + +// Info needed to keep track of a data transfer channel +type dtChannel struct { + channelID datatransfer2.ChannelID + t *Transport + + lk sync.RWMutex + isOpen bool + requestID *graphsync.RequestID + completed chan struct{} + requesterCancelled bool + xferStarted bool + pendingExtensions []graphsync.ExtensionData + + opened chan graphsync.RequestID + + storeLk sync.RWMutex + storeRegistered bool +} + +// Info needed to monitor an ongoing graphsync request +type gsReq struct { + channelID datatransfer2.ChannelID + responseChan <-chan graphsync.ResponseProgress + errChan <-chan error + onComplete func() +} + +// Open a graphsync request for data to the remote peer +func (c *dtChannel) open( + ctx context.Context, + chid datatransfer2.ChannelID, + dataSender peer.ID, + root ipld.Link, + stor ipld.Node, + channel datatransfer2.ChannelState, + exts []graphsync.ExtensionData, +) (*gsReq, error) { + c.lk.Lock() + defer c.lk.Unlock() + + // If there is an existing graphsync request for this channelID + if c.requestID != nil { + // Cancel the existing graphsync request + completed := c.completed + errch := c.cancel(ctx) + + // Wait for the complete callback to be called + err := waitForCompleteHook(ctx, completed) + if err != nil { + return nil, xerrors.Errorf("%s: waiting for cancelled graphsync request to complete: %w", chid, err) + } + + // Wait for the cancel request method to complete + select { + case err = <-errch: + case <-ctx.Done(): + err = xerrors.Errorf("timed out waiting for graphsync request to be cancelled") + } + if err != nil { + return nil, xerrors.Errorf("%s: restarting graphsync request: %w", chid, err) + } + } + + // Set up a completed channel that will be closed when the request + // completes (or is cancelled) + completed := make(chan struct{}) + var onCompleteOnce sync.Once + onComplete := func() { + // Ensure the channel is only closed once + onCompleteOnce.Do(func() { + log.Debugw("closing the completion ch for data-transfer channel", "chid", chid) + close(completed) + }) + } + c.completed = completed + + // Open a new graphsync request + msg := fmt.Sprintf("Opening graphsync request to %s for root %s", dataSender, root) + if channel != nil { + msg += fmt.Sprintf(" with %d Blocks already received", channel.ReceivedCidsTotal()) + } + log.Info(msg) + responseChan, errChan := c.t.gs.Request(ctx, dataSender, root, stor, exts...) + + // Wait for graphsync "request opened" callback + select { + case <-ctx.Done(): + return nil, ctx.Err() + case requestID := <-c.opened: + // Mark the channel as open and save the Graphsync request key + c.isOpen = true + c.requestID = &requestID + } + + return &gsReq{ + channelID: chid, + responseChan: responseChan, + errChan: errChan, + onComplete: onComplete, + }, nil +} + +func waitForCompleteHook(ctx context.Context, completed chan struct{}) error { + // Wait for the cancel to propagate through to graphsync, and for + // the graphsync request to complete + select { + case <-completed: + return nil + case <-time.After(maxGSCancelWait): + // Fail-safe: give up waiting after a certain amount of time + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// gsReqOpened is called when graphsync makes a request to the remote peer to ask for data +func (c *dtChannel) gsReqOpened(requestID graphsync.RequestID, hookActions graphsync.OutgoingRequestHookActions) { + // Tell graphsync to store the received blocks in the registered store + if c.hasStore() { + hookActions.UsePersistenceOption("data-transfer-" + c.channelID.String()) + } + log.Infow("outgoing graphsync request", "peer", c.channelID.OtherParty(c.t.peerID), "graphsync request id", requestID, "data transfer channel id", c.channelID) + // Save a mapping from the graphsync key to the channel ID so that + // subsequent graphsync callbacks are associated with this channel + c.t.requestIDToChannelID.set(requestID, false, c.channelID) + + c.opened <- requestID +} + +// gsDataRequestRcvd is called when the transport receives an incoming request +// for data. +// Note: Must be called under the lock. +func (c *dtChannel) gsDataRequestRcvd(requestID graphsync.RequestID, hookActions graphsync.IncomingRequestHookActions) { + log.Debugf("%s: received request for data, req_id=%d", c.channelID, requestID) + + // If the requester had previously cancelled their request, send any + // message that was queued since the cancel + if c.requesterCancelled { + c.requesterCancelled = false + + extensions := c.pendingExtensions + c.pendingExtensions = nil + for _, ext := range extensions { + hookActions.SendExtensionData(ext) + } + } + + // Tell graphsync to load blocks from the registered store + if c.hasStore() { + hookActions.UsePersistenceOption("data-transfer-" + c.channelID.String()) + } + + // Save a mapping from the graphsync key to the channel ID so that + // subsequent graphsync callbacks are associated with this channel + c.requestID = &requestID + log.Infow("incoming graphsync request", "peer", c.channelID.OtherParty(c.t.peerID), "graphsync request id", requestID, "data transfer channel id", c.channelID) + c.t.requestIDToChannelID.set(requestID, true, c.channelID) + + c.isOpen = true +} + +func (c *dtChannel) pause(ctx context.Context) error { + c.lk.Lock() + defer c.lk.Unlock() + + // Check if the channel was already cancelled + if c.requestID == nil { + log.Debugf("%s: channel was cancelled so not pausing channel", c.channelID) + return nil + } + + // If the requester cancelled, bail out + if c.requesterCancelled { + log.Debugf("%s: requester has cancelled so not pausing response", c.channelID) + return nil + } + + // Pause the response + log.Debugf("%s: pausing response", c.channelID) + return c.t.gs.Pause(ctx, *c.requestID) +} + +func (c *dtChannel) resume(ctx context.Context, msg datatransfer2.Message) error { + c.lk.Lock() + defer c.lk.Unlock() + + // Check if the channel was already cancelled + if c.requestID == nil { + log.Debugf("%s: channel was cancelled so not resuming channel", c.channelID) + return nil + } + + var extensions []graphsync.ExtensionData + if msg != nil { + var err error + extensions, err = extension.ToExtensionData(msg, c.t.supportedExtensions) + if err != nil { + return err + } + } + + // If the requester cancelled, bail out + if c.requesterCancelled { + // If there was an associated message, we still want to send it to the + // remote peer. We're not sending any message now, so instead queue up + // the message to be sent next time the peer makes a request to us. + c.pendingExtensions = append(c.pendingExtensions, extensions...) + + log.Debugf("%s: requester has cancelled so not unpausing response", c.channelID) + return nil + } + + // Record that the transfer has started + c.xferStarted = true + + log.Debugf("%s: unpausing response", c.channelID) + return c.t.gs.Unpause(ctx, *c.requestID, extensions...) +} + +func (c *dtChannel) close(ctx context.Context) error { + var errch chan error + c.lk.Lock() + { + // Check if the channel was already cancelled + if c.requestID != nil { + errch = c.cancel(ctx) + } + } + c.lk.Unlock() + + // Wait for the cancel message to complete + select { + case err := <-errch: + return err + case <-ctx.Done(): + return ctx.Err() + } +} + +// Called when the responder gets a cancel message from the requester +func (c *dtChannel) onRequesterCancelled() { + c.lk.Lock() + defer c.lk.Unlock() + + c.requesterCancelled = true +} + +func (c *dtChannel) hasStore() bool { + c.storeLk.RLock() + defer c.storeLk.RUnlock() + + return c.storeRegistered +} + +// Use the given loader and storer to get / put blocks for the data-transfer. +// Note that each data-transfer channel uses a separate blockstore. +func (c *dtChannel) useStore(lsys ipld.LinkSystem) error { + c.storeLk.Lock() + defer c.storeLk.Unlock() + + // Register the channel's store with graphsync + err := c.t.gs.RegisterPersistenceOption("data-transfer-"+c.channelID.String(), lsys) + if err != nil { + return err + } + + c.storeRegistered = true + + return nil +} + +func (c *dtChannel) cleanup() { + c.lk.Lock() + defer c.lk.Unlock() + + log.Debugf("%s: cleaning up channel", c.channelID) + + if c.hasStore() { + // Unregister the channel's store from graphsync + opt := "data-transfer-" + c.channelID.String() + err := c.t.gs.UnregisterPersistenceOption(opt) + if err != nil { + log.Errorf("failed to unregister persistence option %s: %s", opt, err) + } + } + + // Clean up mapping from gs key to channel ID + c.t.requestIDToChannelID.deleteRefs(c.channelID) +} + +func (c *dtChannel) shutdown(ctx context.Context) error { + // Cancel the graphsync request + c.lk.Lock() + errch := c.cancel(ctx) + c.lk.Unlock() + + // Wait for the cancel message to complete + select { + case err := <-errch: + return err + case <-ctx.Done(): + return ctx.Err() + } +} + +// Cancel the graphsync request. +// Note: must be called under the lock. +func (c *dtChannel) cancel(ctx context.Context) chan error { + errch := make(chan error, 1) + + // Check that the request has not already been cancelled + if c.requesterCancelled || c.requestID == nil { + errch <- nil + return errch + } + + // Clear the graphsync key to indicate that the request has been cancelled + requestID := c.requestID + c.requestID = nil + + go func() { + log.Debugf("%s: cancelling request", c.channelID) + err := c.t.gs.Cancel(ctx, *requestID) + + // Ignore "request not found" errors + if err != nil && !xerrors.Is(graphsync.RequestNotFoundErr{}, err) { + errch <- xerrors.Errorf("cancelling graphsync request for channel %s: %w", c.channelID, err) + } else { + errch <- nil + } + }() + + return errch +} + +type channelInfo struct { + sending bool + channelID datatransfer2.ChannelID +} + +// Used in graphsync callbacks to map from graphsync request to the +// associated data-transfer channel ID. +type requestIDToChannelIDMap struct { + lk sync.RWMutex + m map[graphsync.RequestID]channelInfo +} + +func newRequestIDToChannelIDMap() *requestIDToChannelIDMap { + return &requestIDToChannelIDMap{ + m: make(map[graphsync.RequestID]channelInfo), + } +} + +// get the value for a key +func (m *requestIDToChannelIDMap) load(key graphsync.RequestID) (datatransfer2.ChannelID, bool) { + m.lk.RLock() + defer m.lk.RUnlock() + + val, ok := m.m[key] + return val.channelID, ok +} + +// get the value if any of the keys exists in the map +func (m *requestIDToChannelIDMap) any(ks ...graphsync.RequestID) (datatransfer2.ChannelID, bool) { + m.lk.RLock() + defer m.lk.RUnlock() + + for _, k := range ks { + val, ok := m.m[k] + if ok { + return val.channelID, ok + } + } + return datatransfer2.ChannelID{}, false +} + +// set the value for a key +func (m *requestIDToChannelIDMap) set(key graphsync.RequestID, sending bool, chid datatransfer2.ChannelID) { + m.lk.Lock() + defer m.lk.Unlock() + + m.m[key] = channelInfo{sending, chid} +} + +// call f for each key / value in the map +func (m *requestIDToChannelIDMap) forEach(f func(k graphsync.RequestID, isSending bool, chid datatransfer2.ChannelID)) { + m.lk.RLock() + defer m.lk.RUnlock() + + for k, ch := range m.m { + f(k, ch.sending, ch.channelID) + } +} + +// delete any keys that reference this value +func (m *requestIDToChannelIDMap) deleteRefs(id datatransfer2.ChannelID) { + m.lk.Lock() + defer m.lk.Unlock() + + for k, ch := range m.m { + if ch.channelID == id { + delete(m.m, k) + } + } +} diff --git a/datatransfer/types.go b/datatransfer/types.go new file mode 100644 index 000000000..6c9c6504c --- /dev/null +++ b/datatransfer/types.go @@ -0,0 +1,428 @@ +package datatransfer + +import ( + "fmt" + "time" + + "github.com/filecoin-project/boost/datatransfer/encoding" + "github.com/filecoin-project/go-statemachine/fsm" + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/datamodel" + "github.com/libp2p/go-libp2p/core/peer" + cbg "github.com/whyrusleeping/cbor-gen" +) + +//go:generate cbor-gen-for ChannelID ChannelStages ChannelStage Log + +// TypeIdentifier is a unique string identifier for a type of encodable object in a +// registry +type TypeIdentifier string + +// EmptyTypeIdentifier means there is no voucher present +const EmptyTypeIdentifier = TypeIdentifier("") + +// Voucher is used to validate +// a data transfer request against the underlying storage or retrieval deal +// that precipitated it. The only requirement is a voucher can read and write +// from bytes, and has a string identifier type +type Voucher Registerable + +// VoucherResult is used to provide option additional information about a +// voucher being rejected or accepted +type VoucherResult Registerable + +// TransferID is an identifier for a data transfer, shared between +// request/responder and unique to the requester +type TransferID uint64 + +// TypedVoucher is a voucher or voucher result in IPLD form and an associated +// type identifier for that voucher or voucher result +type TypedVoucher struct { + Voucher datamodel.Node + Type TypeIdentifier +} + +// Registerable is a type of object in a registry. It must be encodable and must +// have a single method that uniquely identifies its type +type Registerable interface { + encoding.Encodable + // Type is a unique string identifier for this voucher type + Type() TypeIdentifier +} + +// Equals is a utility to compare that two TypedVouchers are the same - both type +// and the voucher's IPLD content +func (tv1 TypedVoucher) Equals(tv2 TypedVoucher) bool { + return tv1.Type == tv2.Type && ipld.DeepEqual(tv1.Voucher, tv2.Voucher) +} + +// ChannelID is a unique identifier for a channel, distinct by both the other +// party's peer ID + the transfer ID +type ChannelID struct { + Initiator peer.ID + Responder peer.ID + ID TransferID +} + +func (c ChannelID) String() string { + return fmt.Sprintf("%s-%s-%d", c.Initiator, c.Responder, c.ID) +} + +// OtherParty returns the peer on the other side of the request, depending +// on whether this peer is the initiator or responder +func (c ChannelID) OtherParty(thisPeer peer.ID) peer.ID { + if thisPeer == c.Initiator { + return c.Responder + } + return c.Initiator +} + +// Channel represents all the parameters for a single data transfer +type Channel interface { + // TransferID returns the transfer id for this channel + TransferID() TransferID + + // BaseCID returns the CID that is at the root of this data transfer + BaseCID() cid.Cid + + // Selector returns the IPLD selector for this data transfer (represented as + // an IPLD node) + Selector() datamodel.Node + + // Voucher returns the initial voucher for this data transfer + Voucher() Voucher + + // Sender returns the peer id for the node that is sending data + Sender() peer.ID + + // Recipient returns the peer id for the node that is receiving data + Recipient() peer.ID + + // TotalSize returns the total size for the data being transferred + TotalSize() uint64 + + // IsPull returns whether this is a pull request + IsPull() bool + + // ChannelID returns the ChannelID for this request + ChannelID() ChannelID + + // OtherPeer returns the counter party peer for this channel + OtherPeer() peer.ID +} + +// ChannelState is channel parameters plus it's current state +type ChannelState interface { + Channel + + // SelfPeer returns the peer this channel belongs to + SelfPeer() peer.ID + + // Status is the current status of this channel + Status() Status + + // Sent returns the number of bytes sent + Sent() uint64 + + // Received returns the number of bytes received + Received() uint64 + + // Message offers additional information about the current status + Message() string + + // Vouchers returns all vouchers sent on this channel + Vouchers() []Voucher + + // VoucherResults are results of vouchers sent on the channel + VoucherResults() []VoucherResult + + // LastVoucher returns the last voucher sent on the channel + LastVoucher() Voucher + + // LastVoucherResult returns the last voucher result sent on the channel + LastVoucherResult() VoucherResult + + // ReceivedCidsTotal returns the number of (non-unique) cids received so far + // on the channel - note that a block can exist in more than one place in the DAG + ReceivedCidsTotal() int64 + + // QueuedCidsTotal returns the number of (non-unique) cids queued so far + // on the channel - note that a block can exist in more than one place in the DAG + QueuedCidsTotal() int64 + + // SentCidsTotal returns the number of (non-unique) cids sent so far + // on the channel - note that a block can exist in more than one place in the DAG + SentCidsTotal() int64 + + // Queued returns the number of bytes read from the node and queued for sending + Queued() uint64 + + // Stages returns the timeline of events this data transfer has gone through, + // for observability purposes. + // + // It is unsafe for the caller to modify the return value, and changes + // may not be persisted. It should be treated as immutable. + Stages() *ChannelStages +} + +// ChannelStages captures a timeline of the progress of a data transfer channel, +// grouped by stages. +// +// EXPERIMENTAL; subject to change. +type ChannelStages struct { + // Stages contains an entry for every stage the channel has gone through. + // Each stage then contains logs. + Stages []*ChannelStage +} + +// ChannelStage traces the execution of a data transfer channel stage. +// +// EXPERIMENTAL; subject to change. +type ChannelStage struct { + // Human-readable fields. + // TODO: these _will_ need to be converted to canonical representations, so + // they are machine readable. + Name string + Description string + + // Timestamps. + // TODO: may be worth adding an exit timestamp. It _could_ be inferred from + // the start of the next stage, or from the timestamp of the last log line + // if this is a terminal stage. But that's non-determistic and it relies on + // assumptions. + CreatedTime cbg.CborTime + UpdatedTime cbg.CborTime + + // Logs contains a detailed timeline of events that occurred inside + // this stage. + Logs []*Log +} + +// Log represents a point-in-time event that occurred inside a channel stage. +// +// EXPERIMENTAL; subject to change. +type Log struct { + // Log is a human readable message. + // + // TODO: this _may_ need to be converted to a canonical data model so it + // is machine-readable. + Log string + + UpdatedTime cbg.CborTime +} + +// AddLog adds a log to the specified stage, creating the stage if +// it doesn't exist yet. +// +// EXPERIMENTAL; subject to change. +func (cs *ChannelStages) AddLog(stage, msg string) { + if cs == nil { + return + } + + now := curTime() + st := cs.GetStage(stage) + if st == nil { + st = &ChannelStage{ + CreatedTime: now, + } + cs.Stages = append(cs.Stages, st) + } + + st.Name = stage + st.UpdatedTime = now + if msg != "" && (len(st.Logs) == 0 || st.Logs[len(st.Logs)-1].Log != msg) { + // only add the log if it's not a duplicate. + st.Logs = append(st.Logs, &Log{msg, now}) + } +} + +// GetStage returns the ChannelStage object for a named stage, or nil if not found. +// +// TODO: the input should be a strongly-typed enum instead of a free-form string. +// TODO: drop Get from GetStage to make this code more idiomatic. Return a +// +// second ok boolean to make it even more idiomatic. +// +// EXPERIMENTAL; subject to change. +func (cs *ChannelStages) GetStage(stage string) *ChannelStage { + if cs == nil { + return nil + } + + for _, s := range cs.Stages { + if s.Name == stage { + return s + } + } + + return nil +} + +func curTime() cbg.CborTime { + now := time.Now() + return cbg.CborTime(time.Unix(0, now.UnixNano()).UTC()) +} + +// Status is the status of transfer for a given channel +type Status uint64 + +const ( + // Requested means a data transfer was requested by has not yet been approved + Requested Status = iota + + // Ongoing means the data transfer is in progress + Ongoing + + // TransferFinished indicates the initiator is done sending/receiving + // data but is awaiting confirmation from the responder + TransferFinished + + // ResponderCompleted indicates the initiator received a message from the + // responder that it's completed + ResponderCompleted + + // Finalizing means the responder is awaiting a final message from the initator to + // consider the transfer done + Finalizing + + // Completing just means we have some final cleanup for a completed request + Completing + + // Completed means the data transfer is completed successfully + Completed + + // Failing just means we have some final cleanup for a failed request + Failing + + // Failed means the data transfer failed + Failed + + // Cancelling just means we have some final cleanup for a cancelled request + Cancelling + + // Cancelled means the data transfer ended prematurely + Cancelled + + // DEPRECATED: Use InitiatorPaused() method on ChannelState + InitiatorPaused + + // DEPRECATED: Use ResponderPaused() method on ChannelState + ResponderPaused + + // DEPRECATED: Use BothPaused() method on ChannelState + BothPaused + + // ResponderFinalizing is a unique state where the responder is awaiting a final voucher + ResponderFinalizing + + // ResponderFinalizingTransferFinished is a unique state where the responder is awaiting a final voucher + // and we have received all data + ResponderFinalizingTransferFinished + + // ChannelNotFoundError means the searched for data transfer does not exist + ChannelNotFoundError + + // Queued indicates a data transfer request has been accepted, but is not actively transfering yet + Queued + + // AwaitingAcceptance indicates a transfer request is actively being processed by the transport + // even if the remote has not yet responded that it's accepted the transfer. Such a state can + // occur, for example, in a requestor-initiated transfer that starts processing prior to receiving + // acceptance from the server. + AwaitingAcceptance +) + +type statusList []Status + +func (sl statusList) Contains(s Status) bool { + for _, ts := range sl { + if ts == s { + return true + } + } + return false +} + +func (sl statusList) AsFSMStates() []fsm.StateKey { + sk := make([]fsm.StateKey, 0, len(sl)) + for _, s := range sl { + sk = append(sk, s) + } + return sk +} + +var NotAcceptedStates = statusList{ + Requested, + AwaitingAcceptance, + Cancelled, + Cancelling, + Failed, + Failing, + ChannelNotFoundError} + +func (s Status) IsAccepted() bool { + return !NotAcceptedStates.Contains(s) +} +func (s Status) String() string { + return Statuses[s] +} + +var FinalizationStatuses = statusList{Finalizing, Completed, Completing} + +func (s Status) InFinalization() bool { + return FinalizationStatuses.Contains(s) +} + +var TransferCompleteStates = statusList{ + TransferFinished, + ResponderFinalizingTransferFinished, + Finalizing, + Completed, + Completing, + Failing, + Failed, + Cancelling, + Cancelled, + ChannelNotFoundError, +} + +func (s Status) TransferComplete() bool { + return TransferCompleteStates.Contains(s) +} + +var TransferringStates = statusList{ + Ongoing, + ResponderCompleted, + ResponderFinalizing, + AwaitingAcceptance, +} + +func (s Status) Transferring() bool { + return TransferringStates.Contains(s) +} + +// Statuses are human readable names for data transfer states +var Statuses = map[Status]string{ + // Requested means a data transfer was requested by has not yet been approved + Requested: "Requested", + Ongoing: "Ongoing", + TransferFinished: "TransferFinished", + ResponderCompleted: "ResponderCompleted", + Finalizing: "Finalizing", + Completing: "Completing", + Completed: "Completed", + Failing: "Failing", + Failed: "Failed", + Cancelling: "Cancelling", + Cancelled: "Cancelled", + InitiatorPaused: "InitiatorPaused", + ResponderPaused: "ResponderPaused", + BothPaused: "BothPaused", + ResponderFinalizing: "ResponderFinalizing", + ResponderFinalizingTransferFinished: "ResponderFinalizingTransferFinished", + ChannelNotFoundError: "ChannelNotFoundError", + Queued: "Queued", + AwaitingAcceptance: "AwaitingAcceptance", +} diff --git a/datatransfer/types_cbor_gen.go b/datatransfer/types_cbor_gen.go new file mode 100644 index 000000000..389c214e1 --- /dev/null +++ b/datatransfer/types_cbor_gen.go @@ -0,0 +1,447 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package datatransfer + +import ( + "fmt" + "io" + "math" + "sort" + + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +var lengthBufChannelID = []byte{131} + +func (t *ChannelID) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufChannelID); err != nil { + return err + } + + // t.Initiator (peer.ID) (string) + if len(t.Initiator) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Initiator was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Initiator))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Initiator)); err != nil { + return err + } + + // t.Responder (peer.ID) (string) + if len(t.Responder) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Responder was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Responder))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Responder)); err != nil { + return err + } + + // t.ID (datatransfer.TransferID) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ID)); err != nil { + return err + } + + return nil +} + +func (t *ChannelID) UnmarshalCBOR(r io.Reader) (err error) { + *t = ChannelID{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Initiator (peer.ID) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Initiator = peer.ID(sval) + } + // t.Responder (peer.ID) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Responder = peer.ID(sval) + } + // t.ID (datatransfer.TransferID) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.ID = TransferID(extra) + + } + return nil +} + +var lengthBufChannelStages = []byte{129} + +func (t *ChannelStages) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufChannelStages); err != nil { + return err + } + + // t.Stages ([]*datatransfer.ChannelStage) (slice) + if len(t.Stages) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Stages was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Stages))); err != nil { + return err + } + for _, v := range t.Stages { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } + return nil +} + +func (t *ChannelStages) UnmarshalCBOR(r io.Reader) (err error) { + *t = ChannelStages{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Stages ([]*datatransfer.ChannelStage) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Stages: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Stages = make([]*ChannelStage, extra) + } + + for i := 0; i < int(extra); i++ { + + var v ChannelStage + if err := v.UnmarshalCBOR(cr); err != nil { + return err + } + + t.Stages[i] = &v + } + + return nil +} + +var lengthBufChannelStage = []byte{133} + +func (t *ChannelStage) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufChannelStage); err != nil { + return err + } + + // t.Name (string) (string) + if len(t.Name) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Name was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Name))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Name)); err != nil { + return err + } + + // t.Description (string) (string) + if len(t.Description) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Description was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Description))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Description)); err != nil { + return err + } + + // t.CreatedTime (typegen.CborTime) (struct) + if err := t.CreatedTime.MarshalCBOR(cw); err != nil { + return err + } + + // t.UpdatedTime (typegen.CborTime) (struct) + if err := t.UpdatedTime.MarshalCBOR(cw); err != nil { + return err + } + + // t.Logs ([]*datatransfer.Log) (slice) + if len(t.Logs) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Logs was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Logs))); err != nil { + return err + } + for _, v := range t.Logs { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } + return nil +} + +func (t *ChannelStage) UnmarshalCBOR(r io.Reader) (err error) { + *t = ChannelStage{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 5 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Name (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Name = string(sval) + } + // t.Description (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Description = string(sval) + } + // t.CreatedTime (typegen.CborTime) (struct) + + { + + if err := t.CreatedTime.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.CreatedTime: %w", err) + } + + } + // t.UpdatedTime (typegen.CborTime) (struct) + + { + + if err := t.UpdatedTime.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.UpdatedTime: %w", err) + } + + } + // t.Logs ([]*datatransfer.Log) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Logs: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Logs = make([]*Log, extra) + } + + for i := 0; i < int(extra); i++ { + + var v Log + if err := v.UnmarshalCBOR(cr); err != nil { + return err + } + + t.Logs[i] = &v + } + + return nil +} + +var lengthBufLog = []byte{130} + +func (t *Log) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufLog); err != nil { + return err + } + + // t.Log (string) (string) + if len(t.Log) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Log was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Log))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Log)); err != nil { + return err + } + + // t.UpdatedTime (typegen.CborTime) (struct) + if err := t.UpdatedTime.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *Log) UnmarshalCBOR(r io.Reader) (err error) { + *t = Log{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Log (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Log = string(sval) + } + // t.UpdatedTime (typegen.CborTime) (struct) + + { + + if err := t.UpdatedTime.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.UpdatedTime: %w", err) + } + + } + return nil +} diff --git a/db/migrations/20231005140947_create_ask.sql b/db/migrations/20231005140947_create_ask.sql new file mode 100644 index 000000000..5069a4d30 --- /dev/null +++ b/db/migrations/20231005140947_create_ask.sql @@ -0,0 +1,18 @@ +-- +goose Up +-- +goose StatementBegin +CREATE TABLE IF NOT EXISTS StorageAsk ( + Price INT, + VerifiedPrice INT, + MinPieceSize INT, + MaxPieceSize INT, + Miner Text, + TS INT, + Expiry INT, + SeqNo INT +); +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +DROP TABLE IF EXISTS StorageAsk; +-- +goose StatementEnd \ No newline at end of file diff --git a/db/storageask.go b/db/storageask.go new file mode 100644 index 000000000..243b71039 --- /dev/null +++ b/db/storageask.go @@ -0,0 +1,84 @@ +package db + +import ( + "context" + "database/sql" + "fmt" + + "github.com/filecoin-project/boost/storagemarket/types/legacytypes" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" +) + +type StorageAskDB struct { + db *sql.DB +} + +func NewStorageAskDB(db *sql.DB) *StorageAskDB { + return &StorageAskDB{db: db} +} + +func (s *StorageAskDB) Update(ctx context.Context, ask legacytypes.StorageAsk) error { + var minerString string + qry := "SELECT Miner FROM StorageAsk WHERE Miner=?;" + row := s.db.QueryRowContext(ctx, qry, ask.Miner.String()) + err := row.Scan(&minerString) + switch { + case err == sql.ErrNoRows: + return s.set(ctx, ask) + case err != nil: + return err + default: + s.update(ctx, ask) + } + return nil +} + +func (s *StorageAskDB) set(ctx context.Context, ask legacytypes.StorageAsk) error { + qry := "INSERT INTO StorageAsk (Price, VerifiedPrice, MinPieceSize, MaxPieceSize, Miner, TS, Expiry, SeqNo) " + qry += "VALUES (?, ?, ?, ?, ?, ?, ?, ?)" + values := []interface{}{ask.Price, ask.VerifiedPrice, ask.MinPieceSize, ask.MaxPieceSize, ask.Miner.String(), ask.Timestamp, ask.Expiry, ask.SeqNo} + _, err := s.db.ExecContext(ctx, qry, values...) + return err +} + +func (s *StorageAskDB) update(ctx context.Context, ask legacytypes.StorageAsk) error { + qry := "UPDATE StorageAsk (Price, VerifiedPrice, MinPieceSize, MaxPieceSize, TS, Expiry, SeqNo) " + qry += "VALUES (?, ?, ?, ?, ?, ?, ?, ?) " + qry += "WHERE Miner=?" + values := []interface{}{ask.Price, ask.VerifiedPrice, ask.MinPieceSize, ask.MaxPieceSize, ask.Timestamp, ask.Expiry, ask.SeqNo, ask.Miner.String()} + _, err := s.db.ExecContext(ctx, qry, values...) + return err +} + +func (s *StorageAskDB) Get(ctx context.Context, miner address.Address) (legacytypes.StorageAsk, error) { + var price, verifiedPrice, timestamp, expiry int64 + var minPieceSize, maxPieceSize, seqNo uint64 + var minerS string + qry := "SELECT Price, VerifiedPrice, MinPieceSize, MaxPieceSize, Miner, TS, Expiry, SeqNo FROM StorageAsk WHERE Miner=?;" + row := s.db.QueryRowContext(ctx, qry, miner.String()) + err := row.Scan(&price, &verifiedPrice, &minPieceSize, &maxPieceSize, &minerS, ×tamp, &expiry, &seqNo) + if err != nil { + return legacytypes.StorageAsk{}, err + } + + m, err := address.NewFromString(minerS) + if err != nil { + return legacytypes.StorageAsk{}, fmt.Errorf("converting stored ask address") + } + + if m != miner { + return legacytypes.StorageAsk{}, fmt.Errorf("stored miner address does match the supplied address") + } + + return legacytypes.StorageAsk{ + Price: abi.NewTokenAmount(price), + VerifiedPrice: abi.NewTokenAmount(verifiedPrice), + Timestamp: abi.ChainEpoch(timestamp), + Expiry: abi.ChainEpoch(expiry), + Miner: miner, + MinPieceSize: abi.PaddedPieceSize(minPieceSize), + MaxPieceSize: abi.PaddedPieceSize(maxPieceSize), + SeqNo: seqNo, + }, nil +} diff --git a/fundmanager/fundmanager.go b/fundmanager/fundmanager.go index 78d336ff5..f27580bf0 100644 --- a/fundmanager/fundmanager.go +++ b/fundmanager/fundmanager.go @@ -5,8 +5,8 @@ import ( "errors" "fmt" - "github.com/filecoin-project/boost-gfm/storagemarket" "github.com/filecoin-project/boost/db" + "github.com/filecoin-project/boost/storagemarket/types/legacytypes" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" @@ -219,10 +219,10 @@ func (m *FundManager) MoveFundsToEscrow(ctx context.Context, amt abi.TokenAmount // BalanceMarket returns available and locked amounts in escrow // (on chain with the Storage Market Actor) -func (m *FundManager) BalanceMarket(ctx context.Context) (storagemarket.Balance, error) { +func (m *FundManager) BalanceMarket(ctx context.Context) (legacytypes.Balance, error) { bal, err := m.api.StateMarketBalance(ctx, m.cfg.StorageMiner, types.EmptyTSK) if err != nil { - return storagemarket.Balance{}, err + return legacytypes.Balance{}, err } return toSharedBalance(bal), nil @@ -248,8 +248,8 @@ func (m *FundManager) AddressPublishMsg() address.Address { return m.cfg.PubMsgWallet } -func toSharedBalance(bal api.MarketBalance) storagemarket.Balance { - return storagemarket.Balance{ +func toSharedBalance(bal api.MarketBalance) legacytypes.Balance { + return legacytypes.Balance{ Locked: bal.Locked, Available: big.Sub(bal.Escrow, bal.Locked), } diff --git a/go.mod b/go.mod index 9561ae297..0bdb3124e 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/BurntSushi/toml v1.3.2 github.com/alecthomas/jsonschema v0.0.0-20200530073317-71f438968921 github.com/benbjohnson/clock v1.3.5 - github.com/buger/goterm v1.0.3 + github.com/buger/goterm v1.0.3 // indirect github.com/chzyer/readline v1.5.1 github.com/davecgh/go-spew v1.1.1 github.com/docker/go-units v0.5.0 @@ -45,26 +45,25 @@ require ( github.com/graph-gophers/graphql-go v1.3.0 github.com/graph-gophers/graphql-transport-ws v0.0.2 github.com/hashicorp/go-multierror v1.1.1 - github.com/hnlq715/golang-lru v0.4.0 github.com/ipfs/go-block-format v0.2.0 - github.com/ipfs/go-blockservice v0.5.1 // indirect + github.com/ipfs/go-blockservice v0.5.1 github.com/ipfs/go-cid v0.4.1 github.com/ipfs/go-cidutil v0.1.0 github.com/ipfs/go-datastore v0.6.0 github.com/ipfs/go-graphsync v0.14.10 github.com/ipfs/go-ipfs-blocksutil v0.0.1 - github.com/ipfs/go-ipfs-chunker v0.0.5 // indirect + github.com/ipfs/go-ipfs-chunker v0.0.5 github.com/ipfs/go-ipfs-ds-help v1.1.0 // indirect github.com/ipfs/go-ipfs-exchange-interface v0.2.0 // indirect - github.com/ipfs/go-ipfs-exchange-offline v0.3.0 // indirect + github.com/ipfs/go-ipfs-exchange-offline v0.3.0 github.com/ipfs/go-ipfs-files v0.3.0 // indirect github.com/ipfs/go-ipld-format v0.6.0 github.com/ipfs/go-ipld-legacy v0.2.1 github.com/ipfs/go-libipfs v0.7.0 // indirect github.com/ipfs/go-log/v2 v2.5.1 - github.com/ipfs/go-merkledag v0.11.0 // indirect + github.com/ipfs/go-merkledag v0.11.0 github.com/ipfs/go-metrics-interface v0.0.1 - github.com/ipfs/go-unixfs v0.4.5 // indirect + github.com/ipfs/go-unixfs v0.4.5 github.com/ipld/go-car v0.6.1 github.com/ipld/go-car/v2 v2.13.1 github.com/ipld/go-ipld-prime v0.21.0 @@ -86,7 +85,7 @@ require ( github.com/multiformats/go-multibase v0.2.0 github.com/multiformats/go-multicodec v0.9.0 github.com/multiformats/go-multihash v0.2.3 - github.com/multiformats/go-varint v0.0.7 // indirect + github.com/multiformats/go-varint v0.0.7 github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 github.com/pressly/goose/v3 v3.14.0 github.com/prometheus/client_golang v1.16.0 @@ -128,7 +127,7 @@ require ( github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect github.com/armon/go-metrics v0.3.9 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bep/debounce v1.2.1 // indirect + github.com/bep/debounce v1.2.1 github.com/boltdb/bolt v1.3.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect @@ -170,7 +169,7 @@ require ( github.com/filecoin-project/specs-actors/v3 v3.1.2 // indirect github.com/filecoin-project/specs-actors/v4 v4.0.2 // indirect github.com/filecoin-project/specs-actors/v5 v5.0.6 // indirect - github.com/filecoin-project/specs-actors/v6 v6.0.2 + github.com/filecoin-project/specs-actors/v6 v6.0.2 // indirect github.com/filecoin-project/specs-actors/v7 v7.0.1 // indirect github.com/filecoin-project/specs-actors/v8 v8.0.1 // indirect github.com/flynn/noise v1.0.0 // indirect @@ -276,7 +275,7 @@ require ( github.com/opencontainers/runtime-spec v1.1.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect - github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect + github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polydawn/refmt v0.89.0 // indirect @@ -298,7 +297,7 @@ require ( github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasttemplate v1.2.1 // indirect github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba // indirect - github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect + github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect github.com/whyrusleeping/ledger-filecoin-go v0.9.1-0.20201010031517-c3dcc1bddce4 // indirect @@ -310,7 +309,7 @@ require ( go.uber.org/zap v1.25.0 go4.org v0.0.0-20230225012048-214862532bf5 // indirect golang.org/x/mod v0.12.0 // indirect - golang.org/x/net v0.14.0 // indirect + golang.org/x/net v0.14.0 golang.org/x/sys v0.11.0 // indirect golang.org/x/term v0.11.0 golang.org/x/time v0.3.0 // indirect @@ -337,6 +336,7 @@ require ( github.com/filecoin-project/go-fil-markets v1.28.3 github.com/filecoin-project/lotus v1.23.4-rc1 github.com/ipfs/boxo v0.12.0 + github.com/ipfs/go-ipfs-blockstore v1.3.0 github.com/ipfs/kubo v0.22.0 github.com/ipni/go-libipni v0.5.1 github.com/ipni/ipni-cli v0.1.1 @@ -360,7 +360,6 @@ require ( github.com/hashicorp/go-uuid v1.0.2 // indirect github.com/hashicorp/golang-lru/arc/v2 v2.0.5 // indirect github.com/hashicorp/golang-lru/v2 v2.0.5 // indirect - github.com/ipfs/go-ipfs-blockstore v1.3.0 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect github.com/jackc/pgconn v1.11.0 // indirect github.com/jackc/pgio v1.0.0 // indirect diff --git a/go.sum b/go.sum index c5d7b75b0..7243491dd 100644 --- a/go.sum +++ b/go.sum @@ -687,8 +687,6 @@ github.com/hashicorp/raft v1.3.10/go.mod h1:J8naEwc6XaaCfts7+28whSeRvCqTd6e20BlC github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea h1:xykPFhrBAS2J0VBzVa5e80b5ZtYuNQtgXjN40qBZlD4= github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hnlq715/golang-lru v0.4.0 h1:gyo/wIvLE6Upf1wucAfwTjpR+BQ5Lli2766H2MnNPv0= -github.com/hnlq715/golang-lru v0.4.0/go.mod h1:RBkgDAtlu0SgTPvpb4VW2/RQnkCBMRD3Lr6B9RhsAS8= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= diff --git a/gql/resolver.go b/gql/resolver.go index 4791cb9b4..b22c140e9 100644 --- a/gql/resolver.go +++ b/gql/resolver.go @@ -9,8 +9,6 @@ import ( "time" "github.com/dustin/go-humanize" - "github.com/filecoin-project/boost-gfm/piecestore" - gfm_storagemarket "github.com/filecoin-project/boost-gfm/storagemarket" "github.com/filecoin-project/boost/cmd/lib" "github.com/filecoin-project/boost/db" "github.com/filecoin-project/boost/fundmanager" @@ -20,13 +18,13 @@ import ( "github.com/filecoin-project/boost/lib/mpoolmonitor" "github.com/filecoin-project/boost/markets/storageadapter" "github.com/filecoin-project/boost/node/config" - "github.com/filecoin-project/boost/node/modules/dtypes" "github.com/filecoin-project/boost/piecedirectory" "github.com/filecoin-project/boost/retrievalmarket/rtvllog" "github.com/filecoin-project/boost/sectorstatemgr" "github.com/filecoin-project/boost/storagemanager" "github.com/filecoin-project/boost/storagemarket" "github.com/filecoin-project/boost/storagemarket/sealingpipeline" + "github.com/filecoin-project/boost/storagemarket/storedask" "github.com/filecoin-project/boost/storagemarket/types" "github.com/filecoin-project/boost/storagemarket/types/dealcheckpoints" "github.com/filecoin-project/boost/transport" @@ -64,10 +62,7 @@ type resolver struct { fundMgr *fundmanager.FundManager storageMgr *storagemanager.StorageManager provider *storagemarket.Provider - legacyDeals *legacy.LegacyDealsManager - legacyProv gfm_storagemarket.StorageProvider - legacyDT dtypes.ProviderDataTransfer - ps piecestore.PieceStore + legacyDeals legacy.LegacyDealManager ssm *sectorstatemgr.SectorStateMgr piecedirectory *piecedirectory.PieceDirectory publisher *storageadapter.DealPublisher @@ -77,9 +72,10 @@ type resolver struct { fullNode v1api.FullNode mpool *mpoolmonitor.MpoolMonitor mma *lib.MultiMinerAccessor + askProv *storedask.StoredAsk } -func NewResolver(ctx context.Context, cfg *config.Boost, r lotus_repo.LockedRepo, h host.Host, dealsDB *db.DealsDB, logsDB *db.LogsDB, retDB *rtvllog.RetrievalLogDB, plDB *db.ProposalLogsDB, fundsDB *db.FundsDB, fundMgr *fundmanager.FundManager, storageMgr *storagemanager.StorageManager, spApi sealingpipeline.API, provider *storagemarket.Provider, legacyDeals *legacy.LegacyDealsManager, legacyProv gfm_storagemarket.StorageProvider, legacyDT dtypes.ProviderDataTransfer, ps piecestore.PieceStore, piecedirectory *piecedirectory.PieceDirectory, publisher *storageadapter.DealPublisher, indexProv provider.Interface, idxProvWrapper *indexprovider.Wrapper, fullNode v1api.FullNode, ssm *sectorstatemgr.SectorStateMgr, mpool *mpoolmonitor.MpoolMonitor, mma *lib.MultiMinerAccessor) *resolver { +func NewResolver(ctx context.Context, cfg *config.Boost, r lotus_repo.LockedRepo, h host.Host, dealsDB *db.DealsDB, logsDB *db.LogsDB, retDB *rtvllog.RetrievalLogDB, plDB *db.ProposalLogsDB, fundsDB *db.FundsDB, fundMgr *fundmanager.FundManager, storageMgr *storagemanager.StorageManager, spApi sealingpipeline.API, provider *storagemarket.Provider, legacyDeals legacy.LegacyDealManager, piecedirectory *piecedirectory.PieceDirectory, publisher *storageadapter.DealPublisher, indexProv provider.Interface, idxProvWrapper *indexprovider.Wrapper, fullNode v1api.FullNode, ssm *sectorstatemgr.SectorStateMgr, mpool *mpoolmonitor.MpoolMonitor, mma *lib.MultiMinerAccessor, assk *storedask.StoredAsk) *resolver { return &resolver{ ctx: ctx, cfg: cfg, @@ -94,9 +90,6 @@ func NewResolver(ctx context.Context, cfg *config.Boost, r lotus_repo.LockedRepo storageMgr: storageMgr, provider: provider, legacyDeals: legacyDeals, - legacyProv: legacyProv, - legacyDT: legacyDT, - ps: ps, piecedirectory: piecedirectory, publisher: publisher, spApi: spApi, @@ -106,6 +99,7 @@ func NewResolver(ctx context.Context, cfg *config.Boost, r lotus_repo.LockedRepo ssm: ssm, mpool: mpool, mma: mma, + askProv: assk, } } diff --git a/gql/resolver_ask.go b/gql/resolver_ask.go index 696929fa3..b68849391 100644 --- a/gql/resolver_ask.go +++ b/gql/resolver_ask.go @@ -5,8 +5,8 @@ import ( "fmt" "time" - "github.com/filecoin-project/boost-gfm/storagemarket" "github.com/filecoin-project/boost/gql/types" + "github.com/filecoin-project/boost/storagemarket/types/legacytypes" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/build" "github.com/graph-gophers/graphql-go" @@ -27,7 +27,7 @@ func (r *resolver) StorageAsk(ctx context.Context) (*storageAskResolver, error) return nil, fmt.Errorf("getting chain head: %w", err) } - signedAsk := r.legacyProv.GetAsk() + signedAsk := r.askProv.GetAsk(r.provider.Address) ask := signedAsk.Ask expTimeEpochs := ask.Expiry - head.Height() expTime := time.Now().Add(time.Duration(expTimeEpochs) * time.Duration(build.BlockDelaySecs) * time.Second) @@ -49,8 +49,8 @@ type storageAskUpdate struct { MaxPieceSize *types.Uint64 } -func (r *resolver) StorageAskUpdate(args struct{ Update storageAskUpdate }) (bool, error) { - signedAsk := r.legacyProv.GetAsk() +func (r *resolver) StorageAskUpdate(ctx context.Context, args struct{ Update storageAskUpdate }) (bool, error) { + signedAsk := r.askProv.GetAsk(r.provider.Address) ask := signedAsk.Ask dur := 87660 * time.Hour // 10 years @@ -58,7 +58,7 @@ func (r *resolver) StorageAskUpdate(args struct{ Update storageAskUpdate }) (boo price := ask.Price verifiedPrice := ask.VerifiedPrice - var opts []storagemarket.StorageAskOption + var opts []legacytypes.StorageAskOption update := args.Update if update.Price != nil { @@ -68,13 +68,13 @@ func (r *resolver) StorageAskUpdate(args struct{ Update storageAskUpdate }) (boo verifiedPrice = (*update.VerifiedPrice).Int } if update.MinPieceSize != nil { - opts = append(opts, storagemarket.MinPieceSize(abi.PaddedPieceSize(*update.MinPieceSize))) + opts = append(opts, legacytypes.MinPieceSize(abi.PaddedPieceSize(*update.MinPieceSize))) } if update.MaxPieceSize != nil { - opts = append(opts, storagemarket.MaxPieceSize(abi.PaddedPieceSize(*update.MaxPieceSize))) + opts = append(opts, legacytypes.MaxPieceSize(abi.PaddedPieceSize(*update.MaxPieceSize))) } - err := r.legacyProv.SetAsk(price, verifiedPrice, duration, opts...) + err := r.askProv.SetAsk(ctx, price, verifiedPrice, duration, r.provider.Address, opts...) if err != nil { return false, fmt.Errorf("setting ask: %w", err) } diff --git a/gql/resolver_legacy.go b/gql/resolver_legacy.go index b47001902..30186f8bd 100644 --- a/gql/resolver_legacy.go +++ b/gql/resolver_legacy.go @@ -5,14 +5,14 @@ import ( "fmt" "github.com/dustin/go-humanize" - "github.com/filecoin-project/boost-gfm/storagemarket" gqltypes "github.com/filecoin-project/boost/gql/types" + "github.com/filecoin-project/boost/storagemarket/types/legacytypes" "github.com/graph-gophers/graphql-go" "github.com/ipfs/go-cid" ) type legacyDealResolver struct { - storagemarket.MinerDeal + legacytypes.MinerDeal transferred uint64 } @@ -28,7 +28,7 @@ func (r *resolver) LegacyDeal(ctx context.Context, args struct{ ID graphql.ID }) return nil, fmt.Errorf("parsing deal signed proposal cid %s: %w", args.ID, err) } - dl, err := r.legacyProv.GetLocalDeal(signedPropCid) + dl, err := r.legacyDeals.ByPropCid(signedPropCid) if err != nil { return nil, fmt.Errorf("getting deal with signed proposal cid %s: %w", args.ID, err) } @@ -36,16 +36,8 @@ func (r *resolver) LegacyDeal(ctx context.Context, args struct{ ID graphql.ID }) return r.withTransferState(ctx, dl), nil } -func (r *resolver) withTransferState(ctx context.Context, dl storagemarket.MinerDeal) *legacyDealResolver { +func (r *resolver) withTransferState(ctx context.Context, dl legacytypes.MinerDeal) *legacyDealResolver { dr := &legacyDealResolver{MinerDeal: dl} - if dl.TransferChannelId != nil { - st, err := r.legacyDT.ChannelState(ctx, *dl.TransferChannelId) - if err != nil { - log.Warnw("getting transfer channel id %s: %s", *dl.TransferChannelId, err) - } else { - dr.transferred = st.Received() - } - } return dr } @@ -71,26 +63,26 @@ func (r *resolver) LegacyDeals(ctx context.Context, args dealsArgs) (*legacyDeal } // Get the total number of deals - dealCount, err := r.legacyProv.LocalDealCount() + dealCount, err := r.legacyDeals.DealCount(ctx) if err != nil { return nil, fmt.Errorf("getting deal count: %w", err) } var more bool - var pageDeals []storagemarket.MinerDeal + var pageDeals []legacytypes.MinerDeal if args.Query.Value != nil && *args.Query.Value != "" { // If there is a search query, assume the query is the deal // proposal cid and try to fetch the corresponding deal propCidQuery, err := cid.Parse(*args.Query.Value) if err == nil { - dl, err := r.legacyProv.GetLocalDeal(propCidQuery) + dl, err := r.legacyDeals.ByPropCid(propCidQuery) if err == nil { - pageDeals = []storagemarket.MinerDeal{dl} + pageDeals = []legacytypes.MinerDeal{dl} } } } else { // Get a page worth of deals, plus one extra so we can see if there are more deals - pageDeals, err = r.legacyProv.ListLocalDealsPage(startPropCid, offset, limit+1) + pageDeals, err = r.legacyDeals.ListLocalDealsPage(startPropCid, offset, limit+1) if err != nil { return nil, fmt.Errorf("getting page of deals: %w", err) } @@ -215,11 +207,11 @@ func (r *legacyDealResolver) InboundCARPath() string { } func (r *legacyDealResolver) Status() string { - return storagemarket.DealStates[r.State] + return legacytypes.DealStates[r.State] } func (r *legacyDealResolver) Message() string { - if r.MinerDeal.Message == "" && r.State == storagemarket.StorageDealTransferring { + if r.MinerDeal.Message == "" && r.State == legacytypes.StorageDealTransferring { switch r.transferred { case 0: return "Transferring" diff --git a/gql/resolver_rtvllog.go b/gql/resolver_rtvllog.go index 02eaa1a48..2f38d7a38 100644 --- a/gql/resolver_rtvllog.go +++ b/gql/resolver_rtvllog.go @@ -2,6 +2,7 @@ package gql import ( "context" + gqltypes "github.com/filecoin-project/boost/gql/types" "github.com/filecoin-project/boost/retrievalmarket/rtvllog" "github.com/graph-gophers/graphql-go" diff --git a/indexprovider/wrapper.go b/indexprovider/wrapper.go index 7207e5d8e..bd0f80d5e 100644 --- a/indexprovider/wrapper.go +++ b/indexprovider/wrapper.go @@ -9,12 +9,13 @@ import ( "os" "path/filepath" - "go.uber.org/fx" - + "github.com/filecoin-project/boost/lib/legacy" + "github.com/filecoin-project/boost/storagemarket/types/legacytypes" + "github.com/filecoin-project/go-statemachine/fsm" "github.com/ipfs/go-datastore" "github.com/ipld/go-ipld-prime" + "go.uber.org/fx" - "github.com/filecoin-project/boost-gfm/storagemarket" "github.com/filecoin-project/boost/db" "github.com/filecoin-project/boost/markets/idxprov" "github.com/filecoin-project/boost/node/config" @@ -49,7 +50,7 @@ type Wrapper struct { cfg *config.Boost dealsDB *db.DealsDB - legacyProv storagemarket.StorageProvider + legacyProv legacy.LegacyDealManager prov provider.Interface piecedirectory *piecedirectory.PieceDirectory ssm *sectorstatemgr.SectorStateMgr @@ -63,11 +64,11 @@ type Wrapper struct { } func NewWrapper(cfg *config.Boost) func(lc fx.Lifecycle, h host.Host, r repo.LockedRepo, dealsDB *db.DealsDB, - ssDB *db.SectorStateDB, legacyProv storagemarket.StorageProvider, prov provider.Interface, + ssDB *db.SectorStateDB, legacyProv legacy.LegacyDealManager, prov provider.Interface, piecedirectory *piecedirectory.PieceDirectory, ssm *sectorstatemgr.SectorStateMgr, meshCreator idxprov.MeshCreator, storageService lotus_modules.MinerStorageService) (*Wrapper, error) { return func(lc fx.Lifecycle, h host.Host, r repo.LockedRepo, dealsDB *db.DealsDB, - ssDB *db.SectorStateDB, legacyProv storagemarket.StorageProvider, prov provider.Interface, + ssDB *db.SectorStateDB, legacyProv legacy.LegacyDealManager, prov provider.Interface, piecedirectory *piecedirectory.PieceDirectory, ssm *sectorstatemgr.SectorStateMgr, meshCreator idxprov.MeshCreator, storageService lotus_modules.MinerStorageService) (*Wrapper, error) { @@ -206,7 +207,7 @@ func (w *Wrapper) handleUpdates(ctx context.Context, sectorUpdates map[abi.Secto } // Get deals by sector ID, whether they're legacy or boost deals -func (w *Wrapper) dealsBySectorID(ctx context.Context, legacyDeals map[abi.SectorID][]storagemarket.MinerDeal, sectorID abi.SectorID) ([]basicDealInfo, error) { +func (w *Wrapper) dealsBySectorID(ctx context.Context, legacyDeals map[abi.SectorID][]legacytypes.MinerDeal, sectorID abi.SectorID) ([]basicDealInfo, error) { // First query the boost database deals, err := w.dealsDB.BySectorID(ctx, sectorID) if err != nil { @@ -242,13 +243,13 @@ func (w *Wrapper) dealsBySectorID(ctx context.Context, legacyDeals map[abi.Secto // Iterate over all legacy deals and make a map of sector ID -> legacy deal. // To save memory, only include legacy deals with a sector ID that we know // we're going to query, ie the set of sector IDs in the stateUpdates map. -func (w *Wrapper) legacyDealsBySectorID(stateUpdates map[abi.SectorID]db.SealState) (map[abi.SectorID][]storagemarket.MinerDeal, error) { - legacyDeals, err := w.legacyProv.ListLocalDeals() +func (w *Wrapper) legacyDealsBySectorID(stateUpdates map[abi.SectorID]db.SealState) (map[abi.SectorID][]legacytypes.MinerDeal, error) { + legacyDeals, err := w.legacyProv.ListDeals() if err != nil { return nil, err } - bySectorID := make(map[abi.SectorID][]storagemarket.MinerDeal, len(legacyDeals)) + bySectorID := make(map[abi.SectorID][]legacytypes.MinerDeal, len(legacyDeals)) for _, deal := range legacyDeals { minerID, err := address.IDFromAddress(deal.Proposal.Provider) if err != nil { @@ -282,25 +283,27 @@ func (w *Wrapper) Enabled() bool { // The advertisement published by this function covers 2 protocols: // // Bitswap: -// 1. bitswap is completely disabled: in which case an advertisement is +// +// 1. bitswap is completely disabled: in which case an advertisement is // published with http(or empty if http is disabled) extended providers // that should wipe previous support on indexer side. // -// 2. bitswap is enabled with public addresses: in which case publish an +// 2. bitswap is enabled with public addresses: in which case publish an // advertisement with extended providers records corresponding to the // public addresses. Note, according the IPNI spec, the host ID will // also be added to the extended providers for signing reasons with empty // metadata making a total of 2 extended provider records. // -// 3. bitswap with boostd address: in which case public an advertisement +// 3. bitswap with boostd address: in which case public an advertisement // with one extended provider record that just adds bitswap metadata. // // HTTP: -// 1. http is completely disabled: in which case an advertisement is +// +// 1. http is completely disabled: in which case an advertisement is // published with bitswap(or empty if bitswap is disabled) extended providers // that should wipe previous support on indexer side // -// 2. http is enabled: in which case an advertisement is published with +// 2. http is enabled: in which case an advertisement is published with // bitswap and http(or only http if bitswap is disabled) extended providers // that should wipe previous support on indexer side // @@ -445,22 +448,57 @@ func (w *Wrapper) IndexerAnnounceAllDeals(ctx context.Context) error { } log.Info("announcing all legacy deals to Indexer") - err := w.legacyProv.AnnounceAllDealsToIndexer(ctx) - if err == nil { - log.Infof("finished announcing all legacy deals to Indexer") - } else { - log.Warnw("failed to announce legacy deals to Indexer", "err", err) + + legacyDeals, err := w.legacyProv.ListDeals() + if err != nil { + return fmt.Errorf("failed to get the list of legacy deals: %w", err) + } + + inSealingSubsystem := make(map[fsm.StateKey]struct{}, len(legacytypes.StatesKnownBySealingSubsystem)) + for _, s := range legacytypes.StatesKnownBySealingSubsystem { + inSealingSubsystem[s] = struct{}{} } + expiredStates := make(map[fsm.StateKey]struct{}, len(legacytypes.ProviderFinalityStates)) + for _, s := range legacytypes.ProviderFinalityStates { + expiredStates[s] = struct{}{} + } + + shards := make(map[string]struct{}) + var nSuccess int + var merr error + + for _, d := range legacyDeals { + // only announce deals that have been handed off to the sealing subsystem as the rest will get announced anyways + if _, ok := inSealingSubsystem[d.State]; !ok { + continue + } + // only announce deals that have not expired + if _, ok := expiredStates[d.State]; ok { + continue + } + + adCid, lerr := w.AnnounceLegcayDealToIndexer(ctx, d.ProposalCid) + if lerr != nil { + merr = multierror.Append(merr, lerr) + log.Errorw("failed to announce deal to Index provider", "proposalCid", d.ProposalCid, "err", lerr) + continue + } + log.Infof("announce legacy deal with proposal CID %s to the indexer with announcement-cid: %s", d.ProposalCid.String(), adCid.String()) + shards[d.Proposal.PieceCID.String()] = struct{}{} + nSuccess++ + } + + log.Infow("finished announcing active deals to index provider", "number of deals", nSuccess, "number of shards", shards) + log.Info("announcing all Boost deals to Indexer") deals, err := w.dealsDB.ListActive(ctx) if err != nil { return fmt.Errorf("failed to list deals: %w", err) } - shards := make(map[string]struct{}) - var nSuccess int - var merr error + bshards := make(map[string]struct{}) + var bnSuccess int for _, d := range deals { // filter out deals that will announce automatically at a later @@ -480,11 +518,11 @@ func (w *Wrapper) IndexerAnnounceAllDeals(ctx context.Context) error { } continue } - shards[d.ClientDealProposal.Proposal.PieceCID.String()] = struct{}{} - nSuccess++ + bshards[d.ClientDealProposal.Proposal.PieceCID.String()] = struct{}{} + bnSuccess++ } - log.Infow("finished announcing all boost deals to Indexer", "number of deals", nSuccess, "number of shards", len(shards)) + log.Infow("finished announcing all boost deals to Indexer", "number of deals", bnSuccess, "number of shards", len(bshards)) return merr } @@ -595,7 +633,7 @@ func (w *Wrapper) MultihashLister(ctx context.Context, prov peer.ID, contextID [ } // Deal was not found in boost DB - check in legacy markets - md, legacyErr := w.legacyProv.GetLocalDeal(proposalCid) + md, legacyErr := w.legacyProv.ByPropCid(proposalCid) if legacyErr == nil { // Found the deal, get an interator over the piece return provideF(proposalCid, md.Proposal.PieceCID) @@ -682,5 +720,21 @@ type basicDealInfo struct { AnnounceToIPNI bool DealID string SectorID abi.SectorID - DealProposal storagemarket.ClientDealProposal + DealProposal legacytypes.ClientDealProposal +} + +func (w *Wrapper) AnnounceLegcayDealToIndexer(ctx context.Context, proposalCid cid.Cid) (cid.Cid, error) { + var deal legacytypes.MinerDeal + deal, err := w.legacyProv.ByPropCid(proposalCid) + if err != nil { + return cid.Undef, fmt.Errorf("failed getting deal %s: %w", proposalCid, err) + } + + mt := metadata.GraphsyncFilecoinV1{ + PieceCID: deal.Proposal.PieceCID, + FastRetrieval: deal.FastRetrieval, + VerifiedDeal: deal.Proposal.VerifiedDeal, + } + + return w.AnnounceBoostDealMetadata(ctx, mt, proposalCid) } diff --git a/indexprovider/wrapper_test.go b/indexprovider/wrapper_test.go index 713dd1fc6..8ab82a2e3 100644 --- a/indexprovider/wrapper_test.go +++ b/indexprovider/wrapper_test.go @@ -4,10 +4,11 @@ import ( "context" "testing" - "github.com/filecoin-project/boost-gfm/storagemarket" "github.com/filecoin-project/boost/db" "github.com/filecoin-project/boost/db/migrations" - "github.com/filecoin-project/boost/indexprovider/mock" + _ "github.com/filecoin-project/boost/lib/legacy/mocks" + mocks_legacy "github.com/filecoin-project/boost/lib/legacy/mocks" + "github.com/filecoin-project/boost/storagemarket/types/legacytypes" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/builtin/v9/market" @@ -21,7 +22,7 @@ import ( func TestWrapperEmptyStorageListAndNoUpdates(t *testing.T) { wrapper, legacyStorageProvider, _, _ := setup(t) - legacyStorageProvider.EXPECT().ListLocalDeals().AnyTimes().Return(nil, nil) + legacyStorageProvider.EXPECT().ListDeals().AnyTimes().Return(nil, nil) // handleUpdates with an empty response from MinerAPI.StorageList() and no updates err := wrapper.handleUpdates(context.Background(), nil) @@ -52,7 +53,7 @@ func TestSectorStateManagerMatchingDealOnly(t *testing.T) { t.Run("deal in boost db", func(t *testing.T) { wrapper, legacyStorageProvider, storageMiner, prov := setup(t) - legacyStorageProvider.EXPECT().ListLocalDeals().Return(nil, nil) + legacyStorageProvider.EXPECT().ListDeals().Return(nil, nil) // Add a deal to the database deals, err := db.GenerateNDeals(1) @@ -73,11 +74,11 @@ func TestSectorStateManagerMatchingDealOnly(t *testing.T) { require.NoError(t, err) sectorNum := abi.SectorNumber(10) - deals := []storagemarket.MinerDeal{{ + deals := []legacytypes.MinerDeal{{ ClientDealProposal: boostDeals[0].ClientDealProposal, SectorNumber: sectorNum, }} - legacyStorageProvider.EXPECT().ListLocalDeals().Return(deals, nil) + legacyStorageProvider.EXPECT().ListDeals().Return(deals, nil) provAddr := deals[0].ClientDealProposal.Proposal.Provider runTest(t, wrapper, storageMiner, prov, provAddr, sectorNum) @@ -266,7 +267,7 @@ func TestSectorStateManagerStateChangeToIndexer(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { wrapper, legacyStorageProvider, storageMiner, prov := setup(t) - legacyStorageProvider.EXPECT().ListLocalDeals().AnyTimes().Return(nil, nil) + legacyStorageProvider.EXPECT().ListDeals().AnyTimes().Return(nil, nil) // Add a deal to the database deals, err := db.GenerateNDeals(1) @@ -295,7 +296,7 @@ func TestSectorStateManagerStateChangeToIndexer(t *testing.T) { } } -func setup(t *testing.T) (*Wrapper, *mock.MockStorageProvider, *mockApiStorageMiner, *mock_provider.MockInterface) { +func setup(t *testing.T) (*Wrapper, *mocks_legacy.MockLegacyDealManager, *mockApiStorageMiner, *mock_provider.MockInterface) { ctx := context.Background() ctrl := gomock.NewController(t) prov := mock_provider.NewMockInterface(ctrl) @@ -304,19 +305,20 @@ func setup(t *testing.T) (*Wrapper, *mock.MockStorageProvider, *mockApiStorageMi require.NoError(t, db.CreateAllBoostTables(ctx, sqldb, sqldb)) require.NoError(t, migrations.Migrate(sqldb)) + legacyProv := mocks_legacy.NewMockLegacyDealManager(ctrl) + dealsDB := db.NewDealsDB(sqldb) storageMiner := &mockApiStorageMiner{} - storageProvider := mock.NewMockStorageProvider(ctrl) wrapper := &Wrapper{ enabled: true, dealsDB: dealsDB, prov: prov, - legacyProv: storageProvider, + legacyProv: legacyProv, meshCreator: &meshCreatorStub{}, } - return wrapper, storageProvider, storageMiner, prov + return wrapper, legacyProv, storageMiner, prov } type mockApiStorageMiner struct { diff --git a/itests/framework/framework.go b/itests/framework/framework.go index f8dcd7dc8..e58788c25 100644 --- a/itests/framework/framework.go +++ b/itests/framework/framework.go @@ -12,7 +12,6 @@ import ( "testing" "time" - gfm_storagemarket "github.com/filecoin-project/boost-gfm/storagemarket" "github.com/filecoin-project/boost/api" boostclient "github.com/filecoin-project/boost/client" "github.com/filecoin-project/boost/node" @@ -26,6 +25,7 @@ import ( "github.com/filecoin-project/go-address" cborutil "github.com/filecoin-project/go-cbor-util" lotus_gfm_retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket" + gfm_storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket" lotus_gfm_storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" diff --git a/itests/markets_v1_deal_test.go b/itests/markets_v1_deal_test.go deleted file mode 100644 index 9be1e988d..000000000 --- a/itests/markets_v1_deal_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package itests - -import ( - "context" - "testing" - - "github.com/filecoin-project/boost/itests/framework" - "github.com/filecoin-project/boost/testutil" - lapi "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/itests/kit" - "github.com/stretchr/testify/require" -) - -func TestMarketsV1Deal(t *testing.T) { - ctx := context.Background() - log := framework.Log - - kit.QuietMiningLogs() - framework.SetLogLevel() - var opts []framework.FrameworkOpts - opts = append(opts, framework.EnableLegacyDeals(true)) - f := framework.NewTestFramework(ctx, t, opts...) - err := f.Start() - require.NoError(t, err) - defer f.Stop() - - // Create a CAR file - log.Debugw("using tempdir", "dir", f.HomeDir) - rseed := 0 - size := 7 << 20 // 7MiB file - - inPath, err := testutil.CreateRandomFile(f.HomeDir, rseed, size) - require.NoError(t, err) - res, err := f.FullNode.ClientImport(ctx, lapi.FileRef{Path: inPath}) - require.NoError(t, err) - - // Create a new markets v1 deal - dp := f.DefaultMarketsV1DealParams() - dp.Data.Root = res.Root - - log.Debugw("starting deal", "root", res.Root) - dealProposalCid, err := f.FullNode.ClientStartDeal(ctx, &dp) - require.NoError(t, err) - - log.Debugw("got deal proposal cid", "cid", dealProposalCid) - - err = f.WaitDealSealed(ctx, dealProposalCid) - require.NoError(t, err) - - log.Debugw("deal is sealed, starting retrieval", "cid", dealProposalCid, "root", res.Root) - outPath := f.Retrieve(ctx, t, dealProposalCid, res.Root, true, nil) - - log.Debugw("retrieval is done, compare in- and out- files", "in", inPath, "out", outPath) - kit.AssertFilesEqual(t, inPath, outPath) -} diff --git a/itests/markets_v1_offline_deal_test.go b/itests/markets_v1_offline_deal_test.go deleted file mode 100644 index 45c21cbf2..000000000 --- a/itests/markets_v1_offline_deal_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package itests - -import ( - "context" - "fmt" - "path/filepath" - "testing" - "time" - - "github.com/filecoin-project/boost-gfm/storagemarket" - "github.com/filecoin-project/boost/itests/framework" - "github.com/filecoin-project/boost/testutil" - lapi "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/itests/kit" - "github.com/stretchr/testify/require" -) - -func TestMarketsV1OfflineDeal(t *testing.T) { - ctx := context.Background() - log := framework.Log - - kit.QuietMiningLogs() - framework.SetLogLevel() - var opts []framework.FrameworkOpts - opts = append(opts, framework.EnableLegacyDeals(true)) - f := framework.NewTestFramework(ctx, t, opts...) - err := f.Start() - require.NoError(t, err) - defer f.Stop() - - // Create a CAR file - log.Debugw("using tempdir", "dir", f.HomeDir) - - rseed := 1 - size := 7 << 20 // 7MiB file - inPath, err := testutil.CreateRandomFile(f.HomeDir, rseed, size) - require.NoError(t, err) - res, err := f.FullNode.ClientImport(ctx, lapi.FileRef{Path: inPath}) - require.NoError(t, err) - - // Get the piece size and commP - rootCid := res.Root - pieceInfo, err := f.FullNode.ClientDealPieceCID(ctx, rootCid) - require.NoError(t, err) - - // Create a new markets v1 deal - dp := f.DefaultMarketsV1DealParams() - dp.Data.Root = res.Root - // Replace with params for manual storage deal (offline deal) - dp.Data.TransferType = storagemarket.TTManual - dp.Data.PieceCid = &pieceInfo.PieceCID - dp.Data.PieceSize = pieceInfo.PieceSize.Unpadded() - - log.Debugw("starting offline deal", "root", res.Root) - dealProposalCid, err := f.FullNode.ClientStartDeal(ctx, &dp) - require.NoError(t, err) - log.Debugw("got deal proposal cid", "cid", dealProposalCid) - - // Wait for the deal to reach StorageDealCheckForAcceptance on the client - cd, err := f.FullNode.ClientGetDealInfo(ctx, *dealProposalCid) - require.NoError(t, err) - require.Eventually(t, func() bool { - cd, _ := f.FullNode.ClientGetDealInfo(ctx, *dealProposalCid) - fmt.Println(storagemarket.DealStates[cd.State]) - return cd.State == storagemarket.StorageDealCheckForAcceptance - }, 60*time.Second, 500*time.Millisecond, "actual deal status is %s", storagemarket.DealStates[cd.State]) - - // Create a CAR file from the raw file - log.Debugw("generate out.car for miner") - carFilePath := filepath.Join(f.HomeDir, "out.car") - err = f.FullNode.ClientGenCar(ctx, lapi.FileRef{Path: inPath}, carFilePath) - require.NoError(t, err) - - // Import the CAR file on the miner - this is the equivalent to - // transferring the file across the wire in a normal (non-offline) deal - log.Debugw("import out.car in boost") - err = f.Boost.MarketImportDealData(ctx, *dealProposalCid, carFilePath) - require.NoError(t, err) - - log.Debugw("wait until offline deal is sealed") - err = f.WaitDealSealed(ctx, dealProposalCid) - require.NoError(t, err) - - log.Debugw("offline deal is sealed, starting retrieval", "cid", dealProposalCid, "root", res.Root) - outPath := f.Retrieve(ctx, t, dealProposalCid, res.Root, true, nil) - - log.Debugw("retrieval of offline deal is done, compare in- and out- files", "in", inPath, "out", outPath) - kit.AssertFilesEqual(t, inPath, outPath) -} diff --git a/lib/legacy/dealmanager.go b/lib/legacy/dealmanager.go index 9a4cd19f9..ecdbda048 100644 --- a/lib/legacy/dealmanager.go +++ b/lib/legacy/dealmanager.go @@ -3,18 +3,34 @@ package legacy import ( "context" "errors" - gfm_storagemarket "github.com/filecoin-project/boost-gfm/storagemarket" + "sort" + "sync" + "time" + + "github.com/filecoin-project/boost/storagemarket/types/legacytypes" + "github.com/filecoin-project/go-statemachine/fsm" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" logging "github.com/ipfs/go-log/v2" - "sync" - "time" ) +//go:generate go run github.com/golang/mock/mockgen -destination=mocks/legacy_manager_mock.go . LegacyDealManager + +type LegacyDealManager interface { + Run(ctx context.Context) + DealCount(ctx context.Context) (int, error) + ByPieceCid(ctx context.Context, pieceCid cid.Cid) ([]legacytypes.MinerDeal, error) + ByPayloadCid(ctx context.Context, payloadCid cid.Cid) ([]legacytypes.MinerDeal, error) + ByPublishCid(ctx context.Context, publishCid cid.Cid) ([]legacytypes.MinerDeal, error) + ListDeals() ([]legacytypes.MinerDeal, error) + ByPropCid(propCid cid.Cid) (legacytypes.MinerDeal, error) + ListLocalDealsPage(startPropCid *cid.Cid, offset int, limit int) ([]legacytypes.MinerDeal, error) +} + var log = logging.Logger("legacydeals") -type LegacyDealsManager struct { - legacyProv gfm_storagemarket.StorageProvider +type legacyDealsManager struct { + legacyFSM fsm.Group startedOnce sync.Once started chan struct{} @@ -26,9 +42,9 @@ type LegacyDealsManager struct { publishCidIdx map[cid.Cid][]cid.Cid } -func NewLegacyDealsManager(legacyProv gfm_storagemarket.StorageProvider) *LegacyDealsManager { - return &LegacyDealsManager{ - legacyProv: legacyProv, +func NewLegacyDealsManager(legacyFSM fsm.Group) *legacyDealsManager { + return &legacyDealsManager{ + legacyFSM: legacyFSM, started: make(chan struct{}), pieceCidIdx: make(map[cid.Cid][]cid.Cid), payloadCidIdx: make(map[cid.Cid][]cid.Cid), @@ -36,7 +52,7 @@ func NewLegacyDealsManager(legacyProv gfm_storagemarket.StorageProvider) *Legacy } } -func (m *LegacyDealsManager) Run(ctx context.Context) { +func (m *legacyDealsManager) Run(ctx context.Context) { refresh := func() { err := m.refresh() if err != nil { @@ -60,10 +76,10 @@ func (m *LegacyDealsManager) Run(ctx context.Context) { } } -func (m *LegacyDealsManager) refresh() error { +func (m *legacyDealsManager) refresh() error { start := time.Now() log.Infow("refreshing legacy deals list") - dls, err := m.legacyProv.ListLocalDeals() + dls, err := m.ListDeals() if err != nil { return err } @@ -101,7 +117,7 @@ func (m *LegacyDealsManager) refresh() error { return nil } -func (m *LegacyDealsManager) waitStarted(ctx context.Context) error { +func (m *legacyDealsManager) waitStarted(ctx context.Context) error { select { case <-ctx.Done(): return ctx.Err() @@ -110,7 +126,7 @@ func (m *LegacyDealsManager) waitStarted(ctx context.Context) error { } } -func (m *LegacyDealsManager) DealCount(ctx context.Context) (int, error) { +func (m *legacyDealsManager) DealCount(ctx context.Context) (int, error) { if err := m.waitStarted(ctx); err != nil { return 0, err } @@ -121,7 +137,7 @@ func (m *LegacyDealsManager) DealCount(ctx context.Context) (int, error) { return m.dealCount, nil } -func (m *LegacyDealsManager) ByPieceCid(ctx context.Context, pieceCid cid.Cid) ([]gfm_storagemarket.MinerDeal, error) { +func (m *legacyDealsManager) ByPieceCid(ctx context.Context, pieceCid cid.Cid) ([]legacytypes.MinerDeal, error) { if err := m.waitStarted(ctx); err != nil { return nil, err } @@ -137,7 +153,7 @@ func (m *LegacyDealsManager) ByPieceCid(ctx context.Context, pieceCid cid.Cid) ( return m.byPropCids(propCids) } -func (m *LegacyDealsManager) ByPayloadCid(ctx context.Context, payloadCid cid.Cid) ([]gfm_storagemarket.MinerDeal, error) { +func (m *legacyDealsManager) ByPayloadCid(ctx context.Context, payloadCid cid.Cid) ([]legacytypes.MinerDeal, error) { if err := m.waitStarted(ctx); err != nil { return nil, err } @@ -153,7 +169,7 @@ func (m *LegacyDealsManager) ByPayloadCid(ctx context.Context, payloadCid cid.Ci return m.byPropCids(propCids) } -func (m *LegacyDealsManager) ByPublishCid(ctx context.Context, publishCid cid.Cid) ([]gfm_storagemarket.MinerDeal, error) { +func (m *legacyDealsManager) ByPublishCid(ctx context.Context, publishCid cid.Cid) ([]legacytypes.MinerDeal, error) { if err := m.waitStarted(ctx); err != nil { return nil, err } @@ -170,12 +186,13 @@ func (m *LegacyDealsManager) ByPublishCid(ctx context.Context, publishCid cid.Ci } // Get deals by deal signed proposal cid -func (m *LegacyDealsManager) byPropCids(propCids []cid.Cid) ([]gfm_storagemarket.MinerDeal, error) { - dls := make([]gfm_storagemarket.MinerDeal, 0, len(propCids)) +func (m *legacyDealsManager) byPropCids(propCids []cid.Cid) ([]legacytypes.MinerDeal, error) { + dls := make([]legacytypes.MinerDeal, 0, len(propCids)) for _, propCid := range propCids { - dl, err := m.legacyProv.GetLocalDeal(propCid) + var d legacytypes.MinerDeal + err := m.legacyFSM.Get(propCid).Get(&d) if err == nil { - dls = append(dls, dl) + dls = append(dls, d) continue } @@ -188,3 +205,62 @@ func (m *LegacyDealsManager) byPropCids(propCids []cid.Cid) ([]gfm_storagemarket return dls, nil } + +func (m *legacyDealsManager) ListDeals() ([]legacytypes.MinerDeal, error) { + var list []legacytypes.MinerDeal + if err := m.legacyFSM.List(&list); err != nil { + return nil, err + } + return list, nil +} + +// Get deal by deal signed proposal cid +func (m *legacyDealsManager) ByPropCid(propCid cid.Cid) (legacytypes.MinerDeal, error) { + var d legacytypes.MinerDeal + err := m.legacyFSM.Get(propCid).Get(&d) + if err != nil { + return legacytypes.MinerDeal{}, err + } + return d, nil +} + +func (m *legacyDealsManager) ListLocalDealsPage(startPropCid *cid.Cid, offset int, limit int) ([]legacytypes.MinerDeal, error) { + if limit == 0 { + return []legacytypes.MinerDeal{}, nil + } + + // Get all deals + var deals []legacytypes.MinerDeal + if err := m.legacyFSM.List(&deals); err != nil { + return nil, err + } + + // Sort by creation time descending + sort.Slice(deals, func(i, j int) bool { + return deals[i].CreationTime.Time().After(deals[j].CreationTime.Time()) + }) + + // Iterate through deals until we reach the target signed proposal cid, + // find the offset from there, then add deals from that point up to limit + page := make([]legacytypes.MinerDeal, 0, limit) + startIndex := -1 + if startPropCid == nil { + startIndex = 0 + } + for i, dl := range deals { + // Find the deal with a proposal cid matching startPropCid + if startPropCid != nil && dl.ProposalCid == *startPropCid { + // Start adding deals from offset after the first matching deal + startIndex = i + offset + } + + if startIndex >= 0 && i >= startIndex { + page = append(page, dl) + } + if len(page) == limit { + return page, nil + } + } + + return page, nil +} diff --git a/lib/legacy/mocks/legacy_manager_mock.go b/lib/legacy/mocks/legacy_manager_mock.go new file mode 100644 index 000000000..d61c5adbd --- /dev/null +++ b/lib/legacy/mocks/legacy_manager_mock.go @@ -0,0 +1,154 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/filecoin-project/boost/lib/legacy (interfaces: LegacyDealManager) + +// Package mock_legacy is a generated GoMock package. +package mock_legacy + +import ( + context "context" + reflect "reflect" + + legacytypes "github.com/filecoin-project/boost/storagemarket/types/legacytypes" + gomock "github.com/golang/mock/gomock" + cid "github.com/ipfs/go-cid" +) + +// MockLegacyDealManager is a mock of LegacyDealManager interface. +type MockLegacyDealManager struct { + ctrl *gomock.Controller + recorder *MockLegacyDealManagerMockRecorder +} + +// MockLegacyDealManagerMockRecorder is the mock recorder for MockLegacyDealManager. +type MockLegacyDealManagerMockRecorder struct { + mock *MockLegacyDealManager +} + +// NewMockLegacyDealManager creates a new mock instance. +func NewMockLegacyDealManager(ctrl *gomock.Controller) *MockLegacyDealManager { + mock := &MockLegacyDealManager{ctrl: ctrl} + mock.recorder = &MockLegacyDealManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockLegacyDealManager) EXPECT() *MockLegacyDealManagerMockRecorder { + return m.recorder +} + +// ByPayloadCid mocks base method. +func (m *MockLegacyDealManager) ByPayloadCid(arg0 context.Context, arg1 cid.Cid) ([]legacytypes.MinerDeal, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ByPayloadCid", arg0, arg1) + ret0, _ := ret[0].([]legacytypes.MinerDeal) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ByPayloadCid indicates an expected call of ByPayloadCid. +func (mr *MockLegacyDealManagerMockRecorder) ByPayloadCid(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ByPayloadCid", reflect.TypeOf((*MockLegacyDealManager)(nil).ByPayloadCid), arg0, arg1) +} + +// ByPieceCid mocks base method. +func (m *MockLegacyDealManager) ByPieceCid(arg0 context.Context, arg1 cid.Cid) ([]legacytypes.MinerDeal, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ByPieceCid", arg0, arg1) + ret0, _ := ret[0].([]legacytypes.MinerDeal) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ByPieceCid indicates an expected call of ByPieceCid. +func (mr *MockLegacyDealManagerMockRecorder) ByPieceCid(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ByPieceCid", reflect.TypeOf((*MockLegacyDealManager)(nil).ByPieceCid), arg0, arg1) +} + +// ByPropCid mocks base method. +func (m *MockLegacyDealManager) ByPropCid(arg0 cid.Cid) (legacytypes.MinerDeal, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ByPropCid", arg0) + ret0, _ := ret[0].(legacytypes.MinerDeal) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ByPropCid indicates an expected call of ByPropCid. +func (mr *MockLegacyDealManagerMockRecorder) ByPropCid(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ByPropCid", reflect.TypeOf((*MockLegacyDealManager)(nil).ByPropCid), arg0) +} + +// ByPublishCid mocks base method. +func (m *MockLegacyDealManager) ByPublishCid(arg0 context.Context, arg1 cid.Cid) ([]legacytypes.MinerDeal, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ByPublishCid", arg0, arg1) + ret0, _ := ret[0].([]legacytypes.MinerDeal) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ByPublishCid indicates an expected call of ByPublishCid. +func (mr *MockLegacyDealManagerMockRecorder) ByPublishCid(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ByPublishCid", reflect.TypeOf((*MockLegacyDealManager)(nil).ByPublishCid), arg0, arg1) +} + +// DealCount mocks base method. +func (m *MockLegacyDealManager) DealCount(arg0 context.Context) (int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DealCount", arg0) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DealCount indicates an expected call of DealCount. +func (mr *MockLegacyDealManagerMockRecorder) DealCount(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DealCount", reflect.TypeOf((*MockLegacyDealManager)(nil).DealCount), arg0) +} + +// ListDeals mocks base method. +func (m *MockLegacyDealManager) ListDeals() ([]legacytypes.MinerDeal, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListDeals") + ret0, _ := ret[0].([]legacytypes.MinerDeal) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListDeals indicates an expected call of ListDeals. +func (mr *MockLegacyDealManagerMockRecorder) ListDeals() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListDeals", reflect.TypeOf((*MockLegacyDealManager)(nil).ListDeals)) +} + +// ListLocalDealsPage mocks base method. +func (m *MockLegacyDealManager) ListLocalDealsPage(arg0 *cid.Cid, arg1, arg2 int) ([]legacytypes.MinerDeal, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListLocalDealsPage", arg0, arg1, arg2) + ret0, _ := ret[0].([]legacytypes.MinerDeal) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListLocalDealsPage indicates an expected call of ListLocalDealsPage. +func (mr *MockLegacyDealManagerMockRecorder) ListLocalDealsPage(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListLocalDealsPage", reflect.TypeOf((*MockLegacyDealManager)(nil).ListLocalDealsPage), arg0, arg1, arg2) +} + +// Run mocks base method. +func (m *MockLegacyDealManager) Run(arg0 context.Context) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Run", arg0) +} + +// Run indicates an expected call of Run. +func (mr *MockLegacyDealManagerMockRecorder) Run(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockLegacyDealManager)(nil).Run), arg0) +} diff --git a/markets/journal.go b/markets/journal.go deleted file mode 100644 index 8de216f2a..000000000 --- a/markets/journal.go +++ /dev/null @@ -1,76 +0,0 @@ -package markets - -import ( - "github.com/filecoin-project/boost-gfm/retrievalmarket" - "github.com/filecoin-project/boost-gfm/storagemarket" - - "github.com/filecoin-project/lotus/journal" -) - -type StorageClientEvt struct { - Event string - Deal storagemarket.ClientDeal -} - -type StorageProviderEvt struct { - Event string - Deal storagemarket.MinerDeal -} - -type RetrievalClientEvt struct { - Event string - Deal retrievalmarket.ClientDealState -} - -type RetrievalProviderEvt struct { - Event string - Deal retrievalmarket.ProviderDealState -} - -// StorageClientJournaler records journal events from the storage client. -func StorageClientJournaler(j journal.Journal, evtType journal.EventType) func(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) { - return func(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) { - j.RecordEvent(evtType, func() interface{} { - return StorageClientEvt{ - Event: storagemarket.ClientEvents[event], - Deal: deal, - } - }) - } -} - -// StorageProviderJournaler records journal events from the storage provider. -func StorageProviderJournaler(j journal.Journal, evtType journal.EventType) func(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) { - return func(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) { - j.RecordEvent(evtType, func() interface{} { - return StorageProviderEvt{ - Event: storagemarket.ProviderEvents[event], - Deal: deal, - } - }) - } -} - -// RetrievalClientJournaler records journal events from the retrieval client. -func RetrievalClientJournaler(j journal.Journal, evtType journal.EventType) func(event retrievalmarket.ClientEvent, deal retrievalmarket.ClientDealState) { - return func(event retrievalmarket.ClientEvent, deal retrievalmarket.ClientDealState) { - j.RecordEvent(evtType, func() interface{} { - return RetrievalClientEvt{ - Event: retrievalmarket.ClientEvents[event], - Deal: deal, - } - }) - } -} - -// RetrievalProviderJournaler records journal events from the retrieval provider. -func RetrievalProviderJournaler(j journal.Journal, evtType journal.EventType) func(event retrievalmarket.ProviderEvent, deal retrievalmarket.ProviderDealState) { - return func(event retrievalmarket.ProviderEvent, deal retrievalmarket.ProviderDealState) { - j.RecordEvent(evtType, func() interface{} { - return RetrievalProviderEvt{ - Event: retrievalmarket.ProviderEvents[event], - Deal: deal, - } - }) - } -} diff --git a/markets/loggers/loggers.go b/markets/loggers/loggers.go index 8cdf13402..e765d65f7 100644 --- a/markets/loggers/loggers.go +++ b/markets/loggers/loggers.go @@ -1,49 +1,38 @@ package marketevents import ( + datatransfer2 "github.com/filecoin-project/boost/datatransfer" logging "github.com/ipfs/go-log/v2" - "github.com/filecoin-project/boost-gfm/retrievalmarket" - "github.com/filecoin-project/boost-gfm/storagemarket" - datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" "github.com/filecoin-project/go-state-types/abi" ) var log = logging.Logger("markets") -// StorageClientLogger logs events from the storage client -func StorageClientLogger(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) { - log.Infow("storage client event", "name", storagemarket.ClientEvents[event], "proposal CID", deal.ProposalCid, "state", storagemarket.DealStates[deal.State], "message", deal.Message) -} - -// StorageProviderLogger logs events from the storage provider -func StorageProviderLogger(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) { - log.Infow("storage provider event", "name", storagemarket.ProviderEvents[event], "proposal CID", deal.ProposalCid, "state", storagemarket.DealStates[deal.State], "message", deal.Message) -} - // RetrievalClientLogger logs events from the retrieval client -func RetrievalClientLogger(event retrievalmarket.ClientEvent, deal retrievalmarket.ClientDealState) { +func RetrievalClientLogger(event legacyretrievaltypes.ClientEvent, deal legacyretrievaltypes.ClientDealState) { method := log.Infow - if event == retrievalmarket.ClientEventBlocksReceived { + if event == legacyretrievaltypes.ClientEventBlocksReceived { method = log.Debugw } - method("retrieval client event", "name", retrievalmarket.ClientEvents[event], "deal ID", deal.ID, "state", retrievalmarket.DealStatuses[deal.Status], "message", deal.Message) + method("retrieval client event", "name", legacyretrievaltypes.ClientEvents[event], "deal ID", deal.ID, "state", legacyretrievaltypes.DealStatuses[deal.Status], "message", deal.Message) } // RetrievalProviderLogger logs events from the retrieval provider -func RetrievalProviderLogger(event retrievalmarket.ProviderEvent, deal retrievalmarket.ProviderDealState) { +func RetrievalProviderLogger(event legacyretrievaltypes.ProviderEvent, deal legacyretrievaltypes.ProviderDealState) { method := log.Infow - if event == retrievalmarket.ProviderEventBlockSent { + if event == legacyretrievaltypes.ProviderEventBlockSent { method = log.Debugw } - method("retrieval provider event", "name", retrievalmarket.ProviderEvents[event], "deal ID", deal.ID, "receiver", deal.Receiver, "state", retrievalmarket.DealStatuses[deal.Status], "message", deal.Message) + method("retrieval provider event", "name", legacyretrievaltypes.ProviderEvents[event], "deal ID", deal.ID, "receiver", deal.Receiver, "state", legacyretrievaltypes.DealStatuses[deal.Status], "message", deal.Message) } // DataTransferLogger logs events from the data transfer module -func DataTransferLogger(event datatransfer.Event, state datatransfer.ChannelState) { +func DataTransferLogger(event datatransfer2.Event, state datatransfer2.ChannelState) { log.Debugw("data transfer event", - "name", datatransfer.Events[event.Code], - "status", datatransfer.Statuses[state.Status()], + "name", datatransfer2.Events[event.Code], + "status", datatransfer2.Statuses[state.Status()], "transfer ID", state.TransferID(), "channel ID", state.ChannelID(), "sent", state.Sent(), @@ -68,8 +57,8 @@ func ReadyLogger(module string) func(error) { } type RetrievalEvent struct { - Event retrievalmarket.ClientEvent - Status retrievalmarket.DealStatus + Event legacyretrievaltypes.ClientEvent + Status legacyretrievaltypes.DealStatus BytesReceived uint64 FundsSpent abi.TokenAmount Err string diff --git a/markets/piecestore/impl/piecestore.go b/markets/piecestore/impl/piecestore.go new file mode 100644 index 000000000..74d9f2eae --- /dev/null +++ b/markets/piecestore/impl/piecestore.go @@ -0,0 +1,214 @@ +package piecestoreimpl + +import ( + "context" + + "github.com/filecoin-project/boost/markets/piecestore" + "github.com/filecoin-project/boost/markets/piecestore/migrations" + "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" + "github.com/hannahhoward/go-pubsub" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + logging "github.com/ipfs/go-log/v2" + "golang.org/x/xerrors" + + versioning "github.com/filecoin-project/go-ds-versioning/pkg" + versioned "github.com/filecoin-project/go-ds-versioning/pkg/statestore" + + "github.com/filecoin-project/boost/markets/shared" +) + +var log = logging.Logger("piecestore") + +// DSPiecePrefix is the name space for storing piece infos +var DSPiecePrefix = "/pieces" + +// DSCIDPrefix is the name space for storing CID infos +var DSCIDPrefix = "/cid-infos" + +// NewPieceStore returns a new piecestore based on the given datastore +func NewPieceStore(ds datastore.Batching) (*pieceStore, error) { + pieceInfoMigrations, err := migrations.PieceInfoMigrations.Build() + if err != nil { + return nil, err + } + pieces, migratePieces := versioned.NewVersionedStateStore(namespace.Wrap(ds, datastore.NewKey(DSPiecePrefix)), pieceInfoMigrations, versioning.VersionKey("1")) + cidInfoMigrations, err := migrations.CIDInfoMigrations.Build() + if err != nil { + return nil, err + } + cidInfos, migrateCidInfos := versioned.NewVersionedStateStore(namespace.Wrap(ds, datastore.NewKey(DSCIDPrefix)), cidInfoMigrations, versioning.VersionKey("1")) + return &pieceStore{ + readySub: pubsub.New(shared.ReadyDispatcher), + pieces: pieces, + migratePieces: migratePieces, + cidInfos: cidInfos, + migrateCidInfos: migrateCidInfos, + }, nil +} + +type pieceStore struct { + readySub *pubsub.PubSub + migratePieces func(ctx context.Context) error + pieces versioned.StateStore + migrateCidInfos func(ctx context.Context) error + cidInfos versioned.StateStore +} + +func (ps *pieceStore) Start(ctx context.Context) error { + go func() { + var err error + defer func() { + err = ps.readySub.Publish(err) + if err != nil { + log.Warnf("Publish piecestore migration ready event: %s", err.Error()) + } + }() + err = ps.migratePieces(ctx) + if err != nil { + log.Errorf("Migrating pieceInfos: %s", err.Error()) + return + } + err = ps.migrateCidInfos(ctx) + if err != nil { + log.Errorf("Migrating cidInfos: %s", err.Error()) + } + }() + return nil +} + +func (ps *pieceStore) OnReady(ready shared.ReadyFunc) { + ps.readySub.Subscribe(ready) +} + +// Store `dealInfo` in the PieceStore with key `pieceCID`. +func (ps *pieceStore) AddDealForPiece(pieceCID cid.Cid, _ cid.Cid, dealInfo piecestore.DealInfo) error { + return ps.mutatePieceInfo(pieceCID, func(pi *piecestore.PieceInfo) error { + for _, di := range pi.Deals { + if di == dealInfo { + return nil + } + } + pi.Deals = append(pi.Deals, dealInfo) + return nil + }) +} + +// Store the map of blockLocations in the PieceStore's CIDInfo store, with key `pieceCID` +func (ps *pieceStore) AddPieceBlockLocations(pieceCID cid.Cid, blockLocations map[cid.Cid]piecestore.BlockLocation) error { + for c, blockLocation := range blockLocations { + err := ps.mutateCIDInfo(c, func(ci *piecestore.CIDInfo) error { + for _, pbl := range ci.PieceBlockLocations { + if pbl.PieceCID.Equals(pieceCID) && pbl.BlockLocation == blockLocation { + return nil + } + } + ci.PieceBlockLocations = append(ci.PieceBlockLocations, piecestore.PieceBlockLocation{BlockLocation: blockLocation, PieceCID: pieceCID}) + return nil + }) + if err != nil { + return err + } + } + return nil +} + +func (ps *pieceStore) ListPieceInfoKeys() ([]cid.Cid, error) { + var pis []piecestore.PieceInfo + if err := ps.pieces.List(&pis); err != nil { + return nil, err + } + + out := make([]cid.Cid, 0, len(pis)) + for _, pi := range pis { + out = append(out, pi.PieceCID) + } + + return out, nil +} + +func (ps *pieceStore) ListCidInfoKeys() ([]cid.Cid, error) { + var cis []piecestore.CIDInfo + if err := ps.cidInfos.List(&cis); err != nil { + return nil, err + } + + out := make([]cid.Cid, 0, len(cis)) + for _, ci := range cis { + out = append(out, ci.CID) + } + + return out, nil +} + +// Retrieve the PieceInfo associated with `pieceCID` from the piece info store. +func (ps *pieceStore) GetPieceInfo(pieceCID cid.Cid) (piecestore.PieceInfo, error) { + var out piecestore.PieceInfo + if err := ps.pieces.Get(pieceCID).Get(&out); err != nil { + if xerrors.Is(err, datastore.ErrNotFound) { + return piecestore.PieceInfo{}, xerrors.Errorf("piece with CID %s: %w", pieceCID, legacyretrievaltypes.ErrNotFound) + } + return piecestore.PieceInfo{}, err + } + return out, nil +} + +// Retrieve the CIDInfo associated with `pieceCID` from the CID info store. +func (ps *pieceStore) GetCIDInfo(payloadCID cid.Cid) (piecestore.CIDInfo, error) { + var out piecestore.CIDInfo + if err := ps.cidInfos.Get(payloadCID).Get(&out); err != nil { + if xerrors.Is(err, datastore.ErrNotFound) { + return piecestore.CIDInfo{}, xerrors.Errorf("payload CID %s: %w", payloadCID, legacyretrievaltypes.ErrNotFound) + } + return piecestore.CIDInfo{}, err + } + return out, nil +} + +func (ps *pieceStore) ensurePieceInfo(pieceCID cid.Cid) error { + has, err := ps.pieces.Has(pieceCID) + + if err != nil { + return err + } + if has { + return nil + } + + pieceInfo := piecestore.PieceInfo{PieceCID: pieceCID} + return ps.pieces.Begin(pieceCID, &pieceInfo) +} + +func (ps *pieceStore) ensureCIDInfo(c cid.Cid) error { + has, err := ps.cidInfos.Has(c) + + if err != nil { + return err + } + + if has { + return nil + } + + cidInfo := piecestore.CIDInfo{CID: c} + return ps.cidInfos.Begin(c, &cidInfo) +} + +func (ps *pieceStore) mutatePieceInfo(pieceCID cid.Cid, mutator interface{}) error { + err := ps.ensurePieceInfo(pieceCID) + if err != nil { + return err + } + + return ps.pieces.Get(pieceCID).Mutate(mutator) +} + +func (ps *pieceStore) mutateCIDInfo(c cid.Cid, mutator interface{}) error { + err := ps.ensureCIDInfo(c) + if err != nil { + return err + } + + return ps.cidInfos.Get(c).Mutate(mutator) +} diff --git a/markets/piecestore/migrations/migrations.go b/markets/piecestore/migrations/migrations.go new file mode 100644 index 000000000..b3ec99f45 --- /dev/null +++ b/markets/piecestore/migrations/migrations.go @@ -0,0 +1,90 @@ +package migrations + +import ( + "github.com/filecoin-project/boost/markets/piecestore" + "github.com/ipfs/go-cid" + + versioning "github.com/filecoin-project/go-ds-versioning/pkg" + "github.com/filecoin-project/go-ds-versioning/pkg/versioned" + "github.com/filecoin-project/go-state-types/abi" +) + +//go:generate cbor-gen-for PieceInfo0 DealInfo0 BlockLocation0 PieceBlockLocation0 CIDInfo0 + +// DealInfo0 is version 0 of DealInfo +type DealInfo0 struct { + DealID abi.DealID + SectorID abi.SectorNumber + Offset abi.PaddedPieceSize + Length abi.PaddedPieceSize +} + +// BlockLocation0 is version 0 of BlockLocation +type BlockLocation0 struct { + RelOffset uint64 + BlockSize uint64 +} + +// PieceBlockLocation0 is version 0 of PieceBlockLocation +// is inside of +type PieceBlockLocation0 struct { + BlockLocation0 + PieceCID cid.Cid +} + +// CIDInfo0 is version 0 of CIDInfo +type CIDInfo0 struct { + CID cid.Cid + PieceBlockLocations []PieceBlockLocation0 +} + +// PieceInfo0 is version 0 of PieceInfo +type PieceInfo0 struct { + PieceCID cid.Cid + Deals []DealInfo0 +} + +// MigratePieceInfo0To1 migrates a tuple encoded piece info to a map encoded piece info +func MigratePieceInfo0To1(oldPi *PieceInfo0) (*piecestore.PieceInfo, error) { + deals := make([]piecestore.DealInfo, len(oldPi.Deals)) + for i, oldDi := range oldPi.Deals { + deals[i] = piecestore.DealInfo{ + DealID: oldDi.DealID, + SectorID: oldDi.SectorID, + Offset: oldDi.Offset, + Length: oldDi.Length, + } + } + return &piecestore.PieceInfo{ + PieceCID: oldPi.PieceCID, + Deals: deals, + }, nil +} + +// MigrateCidInfo0To1 migrates a tuple encoded cid info to a map encoded cid info +func MigrateCidInfo0To1(oldCi *CIDInfo0) (*piecestore.CIDInfo, error) { + pieceBlockLocations := make([]piecestore.PieceBlockLocation, len(oldCi.PieceBlockLocations)) + for i, oldPbl := range oldCi.PieceBlockLocations { + pieceBlockLocations[i] = piecestore.PieceBlockLocation{ + BlockLocation: piecestore.BlockLocation{ + RelOffset: oldPbl.RelOffset, + BlockSize: oldPbl.BlockSize, + }, + PieceCID: oldPbl.PieceCID, + } + } + return &piecestore.CIDInfo{ + CID: oldCi.CID, + PieceBlockLocations: pieceBlockLocations, + }, nil +} + +// PieceInfoMigrations is the list of migrations for migrating PieceInfos +var PieceInfoMigrations = versioned.BuilderList{ + versioned.NewVersionedBuilder(MigratePieceInfo0To1, versioning.VersionKey("1")), +} + +// CIDInfoMigrations is the list of migrations for migrating CIDInfos +var CIDInfoMigrations = versioned.BuilderList{ + versioned.NewVersionedBuilder(MigrateCidInfo0To1, versioning.VersionKey("1")), +} diff --git a/markets/piecestore/migrations/migrations_cbor_gen.go b/markets/piecestore/migrations/migrations_cbor_gen.go new file mode 100644 index 000000000..76f5bda29 --- /dev/null +++ b/markets/piecestore/migrations/migrations_cbor_gen.go @@ -0,0 +1,507 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package migrations + +import ( + "fmt" + "io" + "math" + "sort" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +var lengthBufPieceInfo0 = []byte{130} + +func (t *PieceInfo0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufPieceInfo0); err != nil { + return err + } + + // t.PieceCID (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.PieceCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) + } + + // t.Deals ([]migrations.DealInfo0) (slice) + if len(t.Deals) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Deals was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Deals))); err != nil { + return err + } + for _, v := range t.Deals { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } + return nil +} + +func (t *PieceInfo0) UnmarshalCBOR(r io.Reader) (err error) { + *t = PieceInfo0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.PieceCID (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + } + + t.PieceCID = c + + } + // t.Deals ([]migrations.DealInfo0) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Deals: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Deals = make([]DealInfo0, extra) + } + + for i := 0; i < int(extra); i++ { + + var v DealInfo0 + if err := v.UnmarshalCBOR(cr); err != nil { + return err + } + + t.Deals[i] = v + } + + return nil +} + +var lengthBufDealInfo0 = []byte{132} + +func (t *DealInfo0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufDealInfo0); err != nil { + return err + } + + // t.DealID (abi.DealID) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.SectorID (abi.SectorNumber) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SectorID)); err != nil { + return err + } + + // t.Offset (abi.PaddedPieceSize) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Offset)); err != nil { + return err + } + + // t.Length (abi.PaddedPieceSize) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Length)); err != nil { + return err + } + + return nil +} + +func (t *DealInfo0) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealInfo0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 4 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.DealID (abi.DealID) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.SectorID (abi.SectorNumber) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.SectorID = abi.SectorNumber(extra) + + } + // t.Offset (abi.PaddedPieceSize) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Offset = abi.PaddedPieceSize(extra) + + } + // t.Length (abi.PaddedPieceSize) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Length = abi.PaddedPieceSize(extra) + + } + return nil +} + +var lengthBufBlockLocation0 = []byte{130} + +func (t *BlockLocation0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufBlockLocation0); err != nil { + return err + } + + // t.RelOffset (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.RelOffset)); err != nil { + return err + } + + // t.BlockSize (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.BlockSize)); err != nil { + return err + } + + return nil +} + +func (t *BlockLocation0) UnmarshalCBOR(r io.Reader) (err error) { + *t = BlockLocation0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.RelOffset (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.RelOffset = uint64(extra) + + } + // t.BlockSize (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.BlockSize = uint64(extra) + + } + return nil +} + +var lengthBufPieceBlockLocation0 = []byte{130} + +func (t *PieceBlockLocation0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufPieceBlockLocation0); err != nil { + return err + } + + // t.BlockLocation0 (migrations.BlockLocation0) (struct) + if err := t.BlockLocation0.MarshalCBOR(cw); err != nil { + return err + } + + // t.PieceCID (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.PieceCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) + } + + return nil +} + +func (t *PieceBlockLocation0) UnmarshalCBOR(r io.Reader) (err error) { + *t = PieceBlockLocation0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.BlockLocation0 (migrations.BlockLocation0) (struct) + + { + + if err := t.BlockLocation0.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.BlockLocation0: %w", err) + } + + } + // t.PieceCID (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + } + + t.PieceCID = c + + } + return nil +} + +var lengthBufCIDInfo0 = []byte{130} + +func (t *CIDInfo0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufCIDInfo0); err != nil { + return err + } + + // t.CID (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.CID); err != nil { + return xerrors.Errorf("failed to write cid field t.CID: %w", err) + } + + // t.PieceBlockLocations ([]migrations.PieceBlockLocation0) (slice) + if len(t.PieceBlockLocations) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.PieceBlockLocations was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.PieceBlockLocations))); err != nil { + return err + } + for _, v := range t.PieceBlockLocations { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } + return nil +} + +func (t *CIDInfo0) UnmarshalCBOR(r io.Reader) (err error) { + *t = CIDInfo0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.CID (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.CID: %w", err) + } + + t.CID = c + + } + // t.PieceBlockLocations ([]migrations.PieceBlockLocation0) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.PieceBlockLocations: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.PieceBlockLocations = make([]PieceBlockLocation0, extra) + } + + for i := 0; i < int(extra); i++ { + + var v PieceBlockLocation0 + if err := v.UnmarshalCBOR(cr); err != nil { + return err + } + + t.PieceBlockLocations[i] = v + } + + return nil +} diff --git a/markets/piecestore/types.go b/markets/piecestore/types.go new file mode 100644 index 000000000..b935098a7 --- /dev/null +++ b/markets/piecestore/types.go @@ -0,0 +1,70 @@ +package piecestore + +import ( + "context" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/boost/markets/shared" +) + +//go:generate cbor-gen-for --map-encoding PieceInfo DealInfo BlockLocation PieceBlockLocation CIDInfo + +// DealInfo is information about a single deal for a given piece +type DealInfo struct { + DealID abi.DealID + SectorID abi.SectorNumber + Offset abi.PaddedPieceSize + Length abi.PaddedPieceSize +} + +// BlockLocation is information about where a given block is relative to the overall piece +type BlockLocation struct { + RelOffset uint64 + BlockSize uint64 +} + +// PieceBlockLocation is block information along with the pieceCID of the piece the block +// is inside of +type PieceBlockLocation struct { + BlockLocation + PieceCID cid.Cid +} + +// CIDInfo is information about where a given CID will live inside a piece +type CIDInfo struct { + CID cid.Cid + PieceBlockLocations []PieceBlockLocation +} + +// CIDInfoUndefined is cid info with no information +var CIDInfoUndefined = CIDInfo{} + +// PieceInfo is metadata about a piece a provider may be storing based +// on its PieceCID -- so that, given a pieceCID during retrieval, the miner +// can determine how to unseal it if needed +type PieceInfo struct { + PieceCID cid.Cid + Deals []DealInfo +} + +// PieceInfoUndefined is piece info with no information +var PieceInfoUndefined = PieceInfo{} + +func (pi PieceInfo) Defined() bool { + return pi.PieceCID.Defined() || len(pi.Deals) > 0 +} + +// PieceStore is a saved database of piece info that can be modified and queried +type PieceStore interface { + Start(ctx context.Context) error + OnReady(ready shared.ReadyFunc) + AddDealForPiece(pieceCID cid.Cid, payloadCid cid.Cid, dealInfo DealInfo) error + AddPieceBlockLocations(pieceCID cid.Cid, blockLocations map[cid.Cid]BlockLocation) error + GetPieceInfo(pieceCID cid.Cid) (PieceInfo, error) + GetCIDInfo(payloadCID cid.Cid) (CIDInfo, error) + ListCidInfoKeys() ([]cid.Cid, error) + ListPieceInfoKeys() ([]cid.Cid, error) +} diff --git a/markets/piecestore/types_cbor_gen.go b/markets/piecestore/types_cbor_gen.go new file mode 100644 index 000000000..fd74ae523 --- /dev/null +++ b/markets/piecestore/types_cbor_gen.go @@ -0,0 +1,737 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package piecestore + +import ( + "fmt" + "io" + "math" + "sort" + + abi "github.com/filecoin-project/go-state-types/abi" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +func (t *PieceInfo) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.Deals ([]piecestore.DealInfo) (slice) + if len("Deals") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Deals\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Deals"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Deals")); err != nil { + return err + } + + if len(t.Deals) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Deals was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Deals))); err != nil { + return err + } + for _, v := range t.Deals { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } + + // t.PieceCID (cid.Cid) (struct) + if len("PieceCID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceCID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PieceCID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PieceCID")); err != nil { + return err + } + + if err := cbg.WriteCid(cw, t.PieceCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) + } + + return nil +} + +func (t *PieceInfo) UnmarshalCBOR(r io.Reader) (err error) { + *t = PieceInfo{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("PieceInfo: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Deals ([]piecestore.DealInfo) (slice) + case "Deals": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Deals: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Deals = make([]DealInfo, extra) + } + + for i := 0; i < int(extra); i++ { + + var v DealInfo + if err := v.UnmarshalCBOR(cr); err != nil { + return err + } + + t.Deals[i] = v + } + + // t.PieceCID (cid.Cid) (struct) + case "PieceCID": + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + } + + t.PieceCID = c + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DealInfo) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{164}); err != nil { + return err + } + + // t.DealID (abi.DealID) (uint64) + if len("DealID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.Length (abi.PaddedPieceSize) (uint64) + if len("Length") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Length\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Length"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Length")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Length)); err != nil { + return err + } + + // t.Offset (abi.PaddedPieceSize) (uint64) + if len("Offset") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Offset\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Offset"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Offset")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Offset)); err != nil { + return err + } + + // t.SectorID (abi.SectorNumber) (uint64) + if len("SectorID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SectorID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SectorID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("SectorID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SectorID)); err != nil { + return err + } + + return nil +} + +func (t *DealInfo) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealInfo{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealInfo: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.DealID (abi.DealID) (uint64) + case "DealID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.Length (abi.PaddedPieceSize) (uint64) + case "Length": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Length = abi.PaddedPieceSize(extra) + + } + // t.Offset (abi.PaddedPieceSize) (uint64) + case "Offset": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Offset = abi.PaddedPieceSize(extra) + + } + // t.SectorID (abi.SectorNumber) (uint64) + case "SectorID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.SectorID = abi.SectorNumber(extra) + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *BlockLocation) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.BlockSize (uint64) (uint64) + if len("BlockSize") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"BlockSize\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("BlockSize"))); err != nil { + return err + } + if _, err := cw.WriteString(string("BlockSize")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.BlockSize)); err != nil { + return err + } + + // t.RelOffset (uint64) (uint64) + if len("RelOffset") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"RelOffset\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("RelOffset"))); err != nil { + return err + } + if _, err := cw.WriteString(string("RelOffset")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.RelOffset)); err != nil { + return err + } + + return nil +} + +func (t *BlockLocation) UnmarshalCBOR(r io.Reader) (err error) { + *t = BlockLocation{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("BlockLocation: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.BlockSize (uint64) (uint64) + case "BlockSize": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.BlockSize = uint64(extra) + + } + // t.RelOffset (uint64) (uint64) + case "RelOffset": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.RelOffset = uint64(extra) + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *PieceBlockLocation) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.PieceCID (cid.Cid) (struct) + if len("PieceCID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceCID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PieceCID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PieceCID")); err != nil { + return err + } + + if err := cbg.WriteCid(cw, t.PieceCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) + } + + // t.BlockLocation (piecestore.BlockLocation) (struct) + if len("BlockLocation") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"BlockLocation\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("BlockLocation"))); err != nil { + return err + } + if _, err := cw.WriteString(string("BlockLocation")); err != nil { + return err + } + + if err := t.BlockLocation.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *PieceBlockLocation) UnmarshalCBOR(r io.Reader) (err error) { + *t = PieceBlockLocation{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("PieceBlockLocation: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.PieceCID (cid.Cid) (struct) + case "PieceCID": + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + } + + t.PieceCID = c + + } + // t.BlockLocation (piecestore.BlockLocation) (struct) + case "BlockLocation": + + { + + if err := t.BlockLocation.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.BlockLocation: %w", err) + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *CIDInfo) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.CID (cid.Cid) (struct) + if len("CID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("CID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("CID")); err != nil { + return err + } + + if err := cbg.WriteCid(cw, t.CID); err != nil { + return xerrors.Errorf("failed to write cid field t.CID: %w", err) + } + + // t.PieceBlockLocations ([]piecestore.PieceBlockLocation) (slice) + if len("PieceBlockLocations") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceBlockLocations\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PieceBlockLocations"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PieceBlockLocations")); err != nil { + return err + } + + if len(t.PieceBlockLocations) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.PieceBlockLocations was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.PieceBlockLocations))); err != nil { + return err + } + for _, v := range t.PieceBlockLocations { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } + return nil +} + +func (t *CIDInfo) UnmarshalCBOR(r io.Reader) (err error) { + *t = CIDInfo{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("CIDInfo: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.CID (cid.Cid) (struct) + case "CID": + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.CID: %w", err) + } + + t.CID = c + + } + // t.PieceBlockLocations ([]piecestore.PieceBlockLocation) (slice) + case "PieceBlockLocations": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.PieceBlockLocations: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.PieceBlockLocations = make([]PieceBlockLocation, extra) + } + + for i := 0; i < int(extra); i++ { + + var v PieceBlockLocation + if err := v.UnmarshalCBOR(cr); err != nil { + return err + } + + t.PieceBlockLocations[i] = v + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} diff --git a/markets/pricing/cli.go b/markets/pricing/cli.go deleted file mode 100644 index a3844d731..000000000 --- a/markets/pricing/cli.go +++ /dev/null @@ -1,50 +0,0 @@ -package pricing - -import ( - "bytes" - "context" - "encoding/json" - "os/exec" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/boost-gfm/retrievalmarket" - - "github.com/filecoin-project/boost/node/modules/dtypes" -) - -func ExternalRetrievalPricingFunc(cmd string) dtypes.RetrievalPricingFunc { - return func(ctx context.Context, pricingInput retrievalmarket.PricingInput) (retrievalmarket.Ask, error) { - return runPricingFunc(ctx, cmd, pricingInput) - } -} - -func runPricingFunc(_ context.Context, cmd string, params interface{}) (retrievalmarket.Ask, error) { - j, err := json.Marshal(params) - if err != nil { - return retrievalmarket.Ask{}, err - } - - var out bytes.Buffer - var errb bytes.Buffer - - c := exec.Command("sh", "-c", cmd) - c.Stdin = bytes.NewReader(j) - c.Stdout = &out - c.Stderr = &errb - - switch err := c.Run().(type) { - case nil: - bz := out.Bytes() - resp := retrievalmarket.Ask{} - - if err := json.Unmarshal(bz, &resp); err != nil { - return resp, xerrors.Errorf("failed to parse pricing output %s, err=%w", string(bz), err) - } - return resp, nil - case *exec.ExitError: - return retrievalmarket.Ask{}, xerrors.Errorf("pricing func exited with error: %s", errb.String()) - default: - return retrievalmarket.Ask{}, xerrors.Errorf("pricing func cmd run error: %w", err) - } -} diff --git a/markets/retrievaladapter/client.go b/markets/retrievaladapter/client.go deleted file mode 100644 index 5cbd9f59a..000000000 --- a/markets/retrievaladapter/client.go +++ /dev/null @@ -1,127 +0,0 @@ -package retrievaladapter - -import ( - "context" - - "github.com/ipfs/go-cid" - "github.com/multiformats/go-multiaddr" - - "github.com/filecoin-project/boost-gfm/retrievalmarket" - "github.com/filecoin-project/boost-gfm/shared" - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/impl/full" - payapi "github.com/filecoin-project/lotus/node/impl/paych" -) - -type retrievalClientNode struct { - forceOffChain bool - - chainAPI full.ChainAPI - payAPI payapi.PaychAPI - stateAPI full.StateAPI -} - -// NewRetrievalClientNode returns a new node adapter for a retrieval client that talks to the -// Lotus Node -func NewRetrievalClientNode(forceOffChain bool, payAPI payapi.PaychAPI, chainAPI full.ChainAPI, stateAPI full.StateAPI) retrievalmarket.RetrievalClientNode { - return &retrievalClientNode{ - forceOffChain: forceOffChain, - chainAPI: chainAPI, - payAPI: payAPI, - stateAPI: stateAPI, - } -} - -// GetOrCreatePaymentChannel sets up a new payment channel if one does not exist -// between a client and a miner and ensures the client has the given amount of -// funds available in the channel. -func (rcn *retrievalClientNode) GetOrCreatePaymentChannel(ctx context.Context, clientAddress address.Address, minerAddress address.Address, clientFundsAvailable abi.TokenAmount, tok shared.TipSetToken) (address.Address, cid.Cid, error) { - // TODO: respect the provided TipSetToken (a serialized TipSetKey) when - // querying the chain - ci, err := rcn.payAPI.PaychGet(ctx, clientAddress, minerAddress, clientFundsAvailable, api.PaychGetOpts{ - OffChain: rcn.forceOffChain, - }) - if err != nil { - log.Errorw("paych get failed", "error", err) - return address.Undef, cid.Undef, err - } - - return ci.Channel, ci.WaitSentinel, nil -} - -// Allocate late creates a lane within a payment channel so that calls to -// CreatePaymentVoucher will automatically make vouchers only for the difference -// in total -func (rcn *retrievalClientNode) AllocateLane(ctx context.Context, paymentChannel address.Address) (uint64, error) { - return rcn.payAPI.PaychAllocateLane(ctx, paymentChannel) -} - -// CreatePaymentVoucher creates a new payment voucher in the given lane for a -// given payment channel so that all the payment vouchers in the lane add up -// to the given amount (so the payment voucher will be for the difference) -func (rcn *retrievalClientNode) CreatePaymentVoucher(ctx context.Context, paymentChannel address.Address, amount abi.TokenAmount, lane uint64, tok shared.TipSetToken) (*paychtypes.SignedVoucher, error) { - // TODO: respect the provided TipSetToken (a serialized TipSetKey) when - // querying the chain - voucher, err := rcn.payAPI.PaychVoucherCreate(ctx, paymentChannel, amount, lane) - if err != nil { - return nil, err - } - if voucher.Voucher == nil { - return nil, retrievalmarket.NewShortfallError(voucher.Shortfall) - } - return voucher.Voucher, nil -} - -func (rcn *retrievalClientNode) GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) { - head, err := rcn.chainAPI.ChainHead(ctx) - if err != nil { - return nil, 0, err - } - - return head.Key().Bytes(), head.Height(), nil -} - -func (rcn *retrievalClientNode) WaitForPaymentChannelReady(ctx context.Context, messageCID cid.Cid) (address.Address, error) { - return rcn.payAPI.PaychGetWaitReady(ctx, messageCID) -} - -func (rcn *retrievalClientNode) CheckAvailableFunds(ctx context.Context, paymentChannel address.Address) (retrievalmarket.ChannelAvailableFunds, error) { - - channelAvailableFunds, err := rcn.payAPI.PaychAvailableFunds(ctx, paymentChannel) - if err != nil { - return retrievalmarket.ChannelAvailableFunds{}, err - } - return retrievalmarket.ChannelAvailableFunds{ - ConfirmedAmt: channelAvailableFunds.ConfirmedAmt, - PendingAmt: channelAvailableFunds.PendingAmt, - PendingWaitSentinel: channelAvailableFunds.PendingWaitSentinel, - QueuedAmt: channelAvailableFunds.QueuedAmt, - VoucherReedeemedAmt: channelAvailableFunds.VoucherReedeemedAmt, - }, nil -} - -func (rcn *retrievalClientNode) GetKnownAddresses(ctx context.Context, p retrievalmarket.RetrievalPeer, encodedTs shared.TipSetToken) ([]multiaddr.Multiaddr, error) { - tsk, err := types.TipSetKeyFromBytes(encodedTs) - if err != nil { - return nil, err - } - mi, err := rcn.stateAPI.StateMinerInfo(ctx, p.Address, tsk) - if err != nil { - return nil, err - } - multiaddrs := make([]multiaddr.Multiaddr, 0, len(mi.Multiaddrs)) - for _, a := range mi.Multiaddrs { - maddr, err := multiaddr.NewMultiaddrBytes(a) - if err != nil { - return nil, err - } - multiaddrs = append(multiaddrs, maddr) - } - - return multiaddrs, nil -} diff --git a/markets/retrievaladapter/client_blockstore.go b/markets/retrievaladapter/client_blockstore.go deleted file mode 100644 index 409f8a03a..000000000 --- a/markets/retrievaladapter/client_blockstore.go +++ /dev/null @@ -1,83 +0,0 @@ -package retrievaladapter - -import ( - "fmt" - "path/filepath" - "sync" - - bstore "github.com/ipfs/boxo/blockstore" - "github.com/ipfs/go-cid" - "github.com/ipld/go-car/v2/blockstore" - - "github.com/filecoin-project/boost-gfm/retrievalmarket" -) - -// ProxyBlockstoreAccessor is an accessor that returns a fixed blockstore. -// To be used in combination with IPFS integration. -type ProxyBlockstoreAccessor struct { - Blockstore bstore.Blockstore -} - -var _ retrievalmarket.BlockstoreAccessor = (*ProxyBlockstoreAccessor)(nil) - -func NewFixedBlockstoreAccessor(bs bstore.Blockstore) retrievalmarket.BlockstoreAccessor { - return &ProxyBlockstoreAccessor{Blockstore: bs} -} - -func (p *ProxyBlockstoreAccessor) Get(_ retrievalmarket.DealID, _ retrievalmarket.PayloadCID) (bstore.Blockstore, error) { - return p.Blockstore, nil -} - -func (p *ProxyBlockstoreAccessor) Done(_ retrievalmarket.DealID) error { - return nil -} - -type CARBlockstoreAccessor struct { - rootdir string - lk sync.Mutex - open map[retrievalmarket.DealID]*blockstore.ReadWrite -} - -var _ retrievalmarket.BlockstoreAccessor = (*CARBlockstoreAccessor)(nil) - -func NewCARBlockstoreAccessor(rootdir string) *CARBlockstoreAccessor { - return &CARBlockstoreAccessor{ - rootdir: rootdir, - open: make(map[retrievalmarket.DealID]*blockstore.ReadWrite), - } -} - -func (c *CARBlockstoreAccessor) Get(id retrievalmarket.DealID, payloadCid retrievalmarket.PayloadCID) (bstore.Blockstore, error) { - c.lk.Lock() - defer c.lk.Unlock() - - bs, ok := c.open[id] - if ok { - return bs, nil - } - - path := c.PathFor(id) - bs, err := blockstore.OpenReadWrite(path, []cid.Cid{payloadCid}, blockstore.UseWholeCIDs(true)) - if err != nil { - return nil, err - } - c.open[id] = bs - return bs, nil -} - -func (c *CARBlockstoreAccessor) Done(id retrievalmarket.DealID) error { - c.lk.Lock() - defer c.lk.Unlock() - - bs, ok := c.open[id] - if !ok { - return nil - } - - delete(c.open, id) - return bs.Finalize() -} - -func (c *CARBlockstoreAccessor) PathFor(id retrievalmarket.DealID) string { - return filepath.Join(c.rootdir, fmt.Sprintf("%d.car", id)) -} diff --git a/markets/retrievaladapter/provider.go b/markets/retrievaladapter/provider.go deleted file mode 100644 index 7a4fc44c9..000000000 --- a/markets/retrievaladapter/provider.go +++ /dev/null @@ -1,108 +0,0 @@ -package retrievaladapter - -import ( - "context" - - "github.com/hashicorp/go-multierror" - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/boost-gfm/retrievalmarket" - "github.com/filecoin-project/boost-gfm/shared" - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych" - - "github.com/filecoin-project/lotus/api/v1api" - "github.com/filecoin-project/lotus/chain/types" -) - -var log = logging.Logger("retrievaladapter") - -type retrievalProviderNode struct { - full v1api.FullNode -} - -var _ retrievalmarket.RetrievalProviderNode = (*retrievalProviderNode)(nil) - -// NewRetrievalProviderNode returns a new node adapter for a retrieval provider that talks to the -// Lotus Node -func NewRetrievalProviderNode(full v1api.FullNode) retrievalmarket.RetrievalProviderNode { - return &retrievalProviderNode{full: full} -} - -func (rpn *retrievalProviderNode) GetMinerWorkerAddress(ctx context.Context, miner address.Address, tok shared.TipSetToken) (address.Address, error) { - tsk, err := types.TipSetKeyFromBytes(tok) - if err != nil { - return address.Undef, err - } - - mi, err := rpn.full.StateMinerInfo(ctx, miner, tsk) - return mi.Worker, err -} - -func (rpn *retrievalProviderNode) SavePaymentVoucher(ctx context.Context, paymentChannel address.Address, voucher *paychtypes.SignedVoucher, proof []byte, expectedAmount abi.TokenAmount, tok shared.TipSetToken) (abi.TokenAmount, error) { - // TODO: respect the provided TipSetToken (a serialized TipSetKey) when - // querying the chain - added, err := rpn.full.PaychVoucherAdd(ctx, paymentChannel, voucher, proof, expectedAmount) - return added, err -} - -func (rpn *retrievalProviderNode) GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) { - head, err := rpn.full.ChainHead(ctx) - if err != nil { - return nil, 0, err - } - - return head.Key().Bytes(), head.Height(), nil -} - -// GetRetrievalPricingInput takes a set of candidate storage deals that can serve a retrieval request, -// and returns an minimally populated PricingInput. This PricingInput should be enhanced -// with more data, and passed to the pricing function to determine the final quoted price. -func (rpn *retrievalProviderNode) GetRetrievalPricingInput(ctx context.Context, pieceCID cid.Cid, storageDeals []abi.DealID) (retrievalmarket.PricingInput, error) { - resp := retrievalmarket.PricingInput{} - - head, err := rpn.full.ChainHead(ctx) - if err != nil { - return resp, xerrors.Errorf("failed to get chain head: %w", err) - } - tsk := head.Key() - - var mErr error - - for _, dealID := range storageDeals { - ds, err := rpn.full.StateMarketStorageDeal(ctx, dealID, tsk) - if err != nil { - log.Warnf("failed to look up deal %d on chain: err=%w", dealID, err) - mErr = multierror.Append(mErr, err) - continue - } - if ds.Proposal.VerifiedDeal { - resp.VerifiedDeal = true - } - - if ds.Proposal.PieceCID.Equals(pieceCID) { - resp.PieceSize = ds.Proposal.PieceSize.Unpadded() - } - - // If we've discovered a verified deal with the required PieceCID, we don't need - // to lookup more deals and we're done. - if resp.VerifiedDeal && resp.PieceSize != 0 { - break - } - } - - // Note: The piece size can never actually be zero. We only use it to here - // to assert that we didn't find a matching piece. - if resp.PieceSize == 0 { - if mErr == nil { - return resp, xerrors.New("failed to find matching piece") - } - - return resp, xerrors.Errorf("failed to fetch storage deal state: %w", mErr) - } - - return resp, nil -} diff --git a/markets/retrievaladapter/provider_test.go b/markets/retrievaladapter/provider_test.go deleted file mode 100644 index 71cbcee9f..000000000 --- a/markets/retrievaladapter/provider_test.go +++ /dev/null @@ -1,206 +0,0 @@ -// stm: #unit -package retrievaladapter - -import ( - "context" - "testing" - - "github.com/golang/mock/gomock" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" - "golang.org/x/xerrors" - - "github.com/filecoin-project/boost-gfm/retrievalmarket" - testnet "github.com/filecoin-project/boost-gfm/shared_testutil" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/mocks" - "github.com/filecoin-project/lotus/chain/actors/builtin/market" - "github.com/filecoin-project/lotus/chain/types" -) - -func TestGetPricingInput(t *testing.T) { - //stm: @CHAIN_STATE_MARKET_STORAGE_DEAL_001 - ctx := context.Background() - tsk := &types.TipSet{} - key := tsk.Key() - - pcid := testnet.GenerateCids(1)[0] - deals := []abi.DealID{1, 2} - paddedSize := abi.PaddedPieceSize(128) - unpaddedSize := paddedSize.Unpadded() - - tcs := map[string]struct { - pieceCid cid.Cid - deals []abi.DealID - fFnc func(node *mocks.MockFullNode) - - expectedErrorStr string - expectedVerified bool - expectedPieceSize abi.UnpaddedPieceSize - }{ - "error when fails to fetch chain head": { - fFnc: func(n *mocks.MockFullNode) { - n.EXPECT().ChainHead(gomock.Any()).Return(tsk, xerrors.New("chain head error")).Times(1) - }, - expectedErrorStr: "chain head error", - }, - - "error when no piece matches": { - fFnc: func(n *mocks.MockFullNode) { - out1 := &api.MarketDeal{ - Proposal: market.DealProposal{ - PieceCID: testnet.GenerateCids(1)[0], - }, - } - out2 := &api.MarketDeal{ - Proposal: market.DealProposal{ - PieceCID: testnet.GenerateCids(1)[0], - }, - } - - n.EXPECT().ChainHead(gomock.Any()).Return(tsk, nil).Times(1) - gomock.InOrder( - n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[0], key).Return(out1, nil), - n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[1], key).Return(out2, nil), - ) - - }, - expectedErrorStr: "failed to find matching piece", - }, - - "error when fails to fetch deal state": { - fFnc: func(n *mocks.MockFullNode) { - out1 := &api.MarketDeal{ - Proposal: market.DealProposal{ - PieceCID: pcid, - PieceSize: paddedSize, - }, - } - out2 := &api.MarketDeal{ - Proposal: market.DealProposal{ - PieceCID: testnet.GenerateCids(1)[0], - VerifiedDeal: true, - }, - } - - n.EXPECT().ChainHead(gomock.Any()).Return(tsk, nil).Times(1) - gomock.InOrder( - n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[0], key).Return(out1, xerrors.New("error 1")), - n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[1], key).Return(out2, xerrors.New("error 2")), - ) - - }, - expectedErrorStr: "failed to fetch storage deal state", - }, - - "verified is true even if one deal is verified and we get the correct piecesize": { - fFnc: func(n *mocks.MockFullNode) { - out1 := &api.MarketDeal{ - Proposal: market.DealProposal{ - PieceCID: pcid, - PieceSize: paddedSize, - }, - } - out2 := &api.MarketDeal{ - Proposal: market.DealProposal{ - PieceCID: testnet.GenerateCids(1)[0], - VerifiedDeal: true, - }, - } - - n.EXPECT().ChainHead(gomock.Any()).Return(tsk, nil).Times(1) - gomock.InOrder( - n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[0], key).Return(out1, nil), - n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[1], key).Return(out2, nil), - ) - - }, - expectedPieceSize: unpaddedSize, - expectedVerified: true, - }, - - "success even if one deal state fetch errors out but the other deal is verified and has the required piececid": { - fFnc: func(n *mocks.MockFullNode) { - out1 := &api.MarketDeal{ - Proposal: market.DealProposal{ - PieceCID: testnet.GenerateCids(1)[0], - }, - } - out2 := &api.MarketDeal{ - Proposal: market.DealProposal{ - PieceCID: pcid, - PieceSize: paddedSize, - VerifiedDeal: true, - }, - } - - n.EXPECT().ChainHead(gomock.Any()).Return(tsk, nil).Times(1) - gomock.InOrder( - n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[0], key).Return(out1, xerrors.New("some error")), - n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[1], key).Return(out2, nil), - ) - - }, - expectedPieceSize: unpaddedSize, - expectedVerified: true, - }, - - "verified is false if both deals are unverified and we get the correct piece size": { - fFnc: func(n *mocks.MockFullNode) { - out1 := &api.MarketDeal{ - Proposal: market.DealProposal{ - PieceCID: pcid, - PieceSize: paddedSize, - VerifiedDeal: false, - }, - } - out2 := &api.MarketDeal{ - Proposal: market.DealProposal{ - PieceCID: testnet.GenerateCids(1)[0], - VerifiedDeal: false, - }, - } - - n.EXPECT().ChainHead(gomock.Any()).Return(tsk, nil).Times(1) - gomock.InOrder( - n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[0], key).Return(out1, nil), - n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[1], key).Return(out2, nil), - ) - - }, - expectedPieceSize: unpaddedSize, - expectedVerified: false, - }, - } - - for name, tc := range tcs { - tc := tc - t.Run(name, func(t *testing.T) { - mockCtrl := gomock.NewController(t) - // when test is done, assert expectations on all mock objects. - defer mockCtrl.Finish() - - mockFull := mocks.NewMockFullNode(mockCtrl) - rpn := &retrievalProviderNode{ - full: mockFull, - } - if tc.fFnc != nil { - tc.fFnc(mockFull) - } - - resp, err := rpn.GetRetrievalPricingInput(ctx, pcid, deals) - - if tc.expectedErrorStr != "" { - require.Error(t, err) - require.Contains(t, err.Error(), tc.expectedErrorStr) - require.Equal(t, retrievalmarket.PricingInput{}, resp) - } else { - require.NoError(t, err) - require.Equal(t, tc.expectedPieceSize, resp.PieceSize) - require.Equal(t, tc.expectedVerified, resp.VerifiedDeal) - } - }) - } -} diff --git a/markets/sectoraccessor/sectoraccessor.go b/markets/sectoraccessor/sectoraccessor.go index 307136c0c..9b709d3b5 100644 --- a/markets/sectoraccessor/sectoraccessor.go +++ b/markets/sectoraccessor/sectoraccessor.go @@ -8,9 +8,9 @@ import ( logging "github.com/ipfs/go-log/v2" "golang.org/x/xerrors" - "github.com/filecoin-project/boost-gfm/retrievalmarket" "github.com/filecoin-project/dagstore/mount" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api" diff --git a/markets/shared/ready.go b/markets/shared/ready.go new file mode 100644 index 000000000..9b85126bb --- /dev/null +++ b/markets/shared/ready.go @@ -0,0 +1,104 @@ +package shared + +import ( + "context" + "errors" + "sync" + + "github.com/hannahhoward/go-pubsub" +) + +// ReadyFunc is function that gets called once when an event is ready +type ReadyFunc func(error) + +// ReadyDispatcher is just an pubsub dispatcher where the callback is ReadyFunc +func ReadyDispatcher(evt pubsub.Event, fn pubsub.SubscriberFn) error { + migrateErr, ok := evt.(error) + if !ok && evt != nil { + return errors.New("wrong type of event") + } + cb, ok := fn.(ReadyFunc) + if !ok { + return errors.New("wrong type of event") + } + cb(migrateErr) + return nil +} + +// ReadyManager managers listeners for a ready event +type ReadyManager struct { + ctx context.Context + Stop context.CancelFunc + + lk sync.RWMutex + isReady bool + initErr error + pubsub *pubsub.PubSub +} + +func NewReadyManager() *ReadyManager { + ctx, stop := context.WithCancel(context.Background()) + return &ReadyManager{ + ctx: ctx, + Stop: stop, + pubsub: pubsub.New(ReadyDispatcher), + } +} + +// FireReady is called when the ready event occurs +func (m *ReadyManager) FireReady(err error) error { + m.lk.Lock() + defer m.lk.Unlock() + + if m.isReady { + return nil + } + + m.isReady = true + m.initErr = err + return m.pubsub.Publish(err) +} + +// OnReady registers a listener for the ready event. +// If the event has already been fired, the callback is immediately called back +// (in a go-routine). +func (m *ReadyManager) OnReady(ready ReadyFunc) { + m.lk.Lock() + defer m.lk.Unlock() + + if m.isReady { + initErr := m.initErr + go ready(initErr) + return + } + + m.pubsub.Subscribe(ready) +} + +// AwaitReady blocks until the ready event fires. +// Returns immediately if the event already fired. +func (m *ReadyManager) AwaitReady() error { + m.lk.RLock() + isReady := m.isReady + m.lk.RUnlock() + + if isReady { + return m.initErr + } + + errch := make(chan error) + m.OnReady(func(err error) { + select { + case <-m.ctx.Done(): + errch <- m.ctx.Err() + case errch <- err: + } + }) + + select { + case <-m.ctx.Done(): + return m.ctx.Err() + case err := <-errch: + return err + } +} diff --git a/markets/shared/retrystream.go b/markets/shared/retrystream.go new file mode 100644 index 000000000..34a75d08b --- /dev/null +++ b/markets/shared/retrystream.go @@ -0,0 +1,103 @@ +package shared + +import ( + "context" + "time" + + logging "github.com/ipfs/go-log/v2" + "github.com/jpillora/backoff" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + "golang.org/x/xerrors" +) + +var log = logging.Logger("data_transfer_network") + +// The max number of attempts to open a stream +const defaultMaxStreamOpenAttempts = 5 + +// The min backoff time between retries +const defaultMinAttemptDuration = 1 * time.Second + +// The max backoff time between retries +const defaultMaxAttemptDuration = 5 * time.Minute + +// The multiplier in the backoff time for each retry +const defaultBackoffFactor = 5 + +type StreamOpener interface { + NewStream(ctx context.Context, p peer.ID, pids ...protocol.ID) (network.Stream, error) +} + +type RetryStreamOption func(*RetryStream) + +// RetryParameters changes the default parameters around connection reopening +func RetryParameters(minDuration time.Duration, maxDuration time.Duration, attempts float64, backoffFactor float64) RetryStreamOption { + return func(impl *RetryStream) { + impl.maxStreamOpenAttempts = attempts + impl.minAttemptDuration = minDuration + impl.maxAttemptDuration = maxDuration + } +} + +type RetryStream struct { + opener StreamOpener + + backoffFactor float64 + maxStreamOpenAttempts float64 + minAttemptDuration time.Duration + maxAttemptDuration time.Duration +} + +func NewRetryStream(opener StreamOpener, options ...RetryStreamOption) *RetryStream { + impl := &RetryStream{ + opener: opener, + backoffFactor: defaultBackoffFactor, + maxStreamOpenAttempts: defaultMaxStreamOpenAttempts, + minAttemptDuration: defaultMinAttemptDuration, + maxAttemptDuration: defaultMaxAttemptDuration, + } + impl.SetOptions(options...) + return impl +} + +func (impl *RetryStream) SetOptions(options ...RetryStreamOption) { + for _, option := range options { + option(impl) + } +} + +func (impl *RetryStream) OpenStream(ctx context.Context, id peer.ID, protocols []protocol.ID) (network.Stream, error) { + b := &backoff.Backoff{ + Min: impl.minAttemptDuration, + Max: impl.maxAttemptDuration, + Factor: impl.maxStreamOpenAttempts, + Jitter: true, + } + + for { + s, err := impl.opener.NewStream(ctx, id, protocols...) + if err == nil { + return s, err + } + + // b.Attempt() starts from zero + nAttempts := b.Attempt() + 1 + if nAttempts >= impl.maxStreamOpenAttempts { + return nil, xerrors.Errorf("exhausted %d attempts but failed to open stream, err: %w", int(impl.maxStreamOpenAttempts), err) + } + + duration := b.Duration() + log.Warnf("failed to open stream to %s on attempt %.0f of %.0f, waiting %s to try again, err: %s", + id, nAttempts, impl.maxStreamOpenAttempts, duration, err) + + ebt := time.NewTimer(duration) + select { + case <-ctx.Done(): + ebt.Stop() + return nil, xerrors.Errorf("open stream to %s canceled by context", id) + case <-ebt.C: + } + } +} diff --git a/markets/shared/shared.go b/markets/shared/shared.go new file mode 100644 index 000000000..7f752a236 --- /dev/null +++ b/markets/shared/shared.go @@ -0,0 +1,7 @@ +package shared + +// TipSetToken is the implementation-nonspecific identity for a tipset. +type TipSetToken []byte + +// Unsubscribe is a function that gets called to unsubscribe from (storage|retrieval)market events +type Unsubscribe func() diff --git a/markets/shared/timecounter.go b/markets/shared/timecounter.go new file mode 100644 index 000000000..68ca73bc0 --- /dev/null +++ b/markets/shared/timecounter.go @@ -0,0 +1,21 @@ +package shared + +import ( + "sync/atomic" + "time" +) + +// TimeCounter is used to generate a monotonically increasing sequence. +// It starts at the current time, then increments on each call to next. +type TimeCounter struct { + counter uint64 +} + +func NewTimeCounter() *TimeCounter { + return &TimeCounter{counter: uint64(time.Now().UnixNano())} +} + +func (tc *TimeCounter) Next() uint64 { + counter := atomic.AddUint64(&tc.counter, 1) + return counter +} diff --git a/markets/storageadapter/client.go b/markets/storageadapter/client.go deleted file mode 100644 index cacc40cb7..000000000 --- a/markets/storageadapter/client.go +++ /dev/null @@ -1,446 +0,0 @@ -package storageadapter - -// this file implements storagemarket.StorageClientNode - -import ( - "bytes" - "context" - - "github.com/ipfs/go-cid" - "go.uber.org/fx" - "golang.org/x/xerrors" - - "github.com/filecoin-project/boost-gfm/shared" - "github.com/filecoin-project/boost-gfm/storagemarket" - "github.com/filecoin-project/go-address" - cborutil "github.com/filecoin-project/go-cbor-util" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market" - "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/go-state-types/exitcode" - builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" - - "github.com/filecoin-project/boost/markets/utils" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - marketactor "github.com/filecoin-project/lotus/chain/actors/builtin/market" - "github.com/filecoin-project/lotus/chain/events" - "github.com/filecoin-project/lotus/chain/events/state" - "github.com/filecoin-project/lotus/chain/market" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/sigs" - "github.com/filecoin-project/lotus/node/impl/full" - "github.com/filecoin-project/lotus/node/modules/helpers" -) - -type ClientNodeAdapter struct { - *clientApi - - fundmgr *market.FundManager - ev *events.Events - dsMatcher *dealStateMatcher - scMgr *SectorCommittedManager -} - -type clientApi struct { - full.ChainAPI - full.StateAPI - full.MpoolAPI -} - -func NewClientNodeAdapter(mctx helpers.MetricsCtx, lc fx.Lifecycle, stateapi full.StateAPI, chain full.ChainAPI, mpool full.MpoolAPI, fundmgr *market.FundManager) (storagemarket.StorageClientNode, error) { - capi := &clientApi{chain, stateapi, mpool} - ctx := helpers.LifecycleCtx(mctx, lc) - - ev, err := events.NewEvents(ctx, capi) - if err != nil { - return nil, err - } - a := &ClientNodeAdapter{ - clientApi: capi, - - fundmgr: fundmgr, - ev: ev, - dsMatcher: newDealStateMatcher(state.NewStatePredicates(state.WrapFastAPI(capi))), - } - a.scMgr = NewSectorCommittedManager(ev, a, &apiWrapper{api: capi}) - return a, nil -} - -func (c *ClientNodeAdapter) ListStorageProviders(ctx context.Context, encodedTs shared.TipSetToken) ([]*storagemarket.StorageProviderInfo, error) { - tsk, err := types.TipSetKeyFromBytes(encodedTs) - if err != nil { - return nil, err - } - - addresses, err := c.StateListMiners(ctx, tsk) - if err != nil { - return nil, err - } - - var out []*storagemarket.StorageProviderInfo - - for _, addr := range addresses { - mi, err := c.GetMinerInfo(ctx, addr, encodedTs) - if err != nil { - return nil, err - } - - out = append(out, mi) - } - - return out, nil -} - -func (c *ClientNodeAdapter) VerifySignature(ctx context.Context, sig crypto.Signature, addr address.Address, input []byte, encodedTs shared.TipSetToken) (bool, error) { - addr, err := c.StateAccountKey(ctx, addr, types.EmptyTSK) - if err != nil { - return false, err - } - - err = sigs.Verify(&sig, addr, input) - return err == nil, err -} - -// Adds funds with the StorageMinerActor for a storage participant. Used by both providers and clients. -func (c *ClientNodeAdapter) AddFunds(ctx context.Context, addr address.Address, amount abi.TokenAmount) (cid.Cid, error) { - // (Provider Node API) - smsg, err := c.MpoolPushMessage(ctx, &types.Message{ - To: marketactor.Address, - From: addr, - Value: amount, - Method: builtin6.MethodsMarket.AddBalance, - }, nil) - if err != nil { - return cid.Undef, err - } - - return smsg.Cid(), nil -} - -func (c *ClientNodeAdapter) ReserveFunds(ctx context.Context, wallet, addr address.Address, amt abi.TokenAmount) (cid.Cid, error) { - return c.fundmgr.Reserve(ctx, wallet, addr, amt) -} - -func (c *ClientNodeAdapter) ReleaseFunds(ctx context.Context, addr address.Address, amt abi.TokenAmount) error { - return c.fundmgr.Release(addr, amt) -} - -func (c *ClientNodeAdapter) GetBalance(ctx context.Context, addr address.Address, encodedTs shared.TipSetToken) (storagemarket.Balance, error) { - tsk, err := types.TipSetKeyFromBytes(encodedTs) - if err != nil { - return storagemarket.Balance{}, err - } - - bal, err := c.StateMarketBalance(ctx, addr, tsk) - if err != nil { - return storagemarket.Balance{}, err - } - - return utils.ToSharedBalance(bal), nil -} - -// ValidatePublishedDeal validates that the provided deal has appeared on chain and references the same ClientDeal -// returns the Deal id if there is no error -// TODO: Don't return deal ID -func (c *ClientNodeAdapter) ValidatePublishedDeal(ctx context.Context, deal storagemarket.ClientDeal) (abi.DealID, error) { - log.Infow("DEAL ACCEPTED!") - - pubmsg, err := c.ChainGetMessage(ctx, *deal.PublishMessage) - if err != nil { - return 0, xerrors.Errorf("getting deal publish message: %w", err) - } - - mi, err := c.StateMinerInfo(ctx, deal.Proposal.Provider, types.EmptyTSK) - if err != nil { - return 0, xerrors.Errorf("getting miner worker failed: %w", err) - } - - fromid, err := c.StateLookupID(ctx, pubmsg.From, types.EmptyTSK) - if err != nil { - return 0, xerrors.Errorf("failed to resolve from msg ID addr: %w", err) - } - - var pubOk bool - pubAddrs := append([]address.Address{mi.Worker, mi.Owner}, mi.ControlAddresses...) - for _, a := range pubAddrs { - if fromid == a { - pubOk = true - break - } - } - - if !pubOk { - return 0, xerrors.Errorf("deal wasn't published by storage provider: from=%s, provider=%s,%+v", pubmsg.From, deal.Proposal.Provider, pubAddrs) - } - - if pubmsg.To != marketactor.Address { - return 0, xerrors.Errorf("deal publish message wasn't set to StorageMarket actor (to=%s)", pubmsg.To) - } - - if pubmsg.Method != builtin6.MethodsMarket.PublishStorageDeals { - return 0, xerrors.Errorf("deal publish message called incorrect method (method=%s)", pubmsg.Method) - } - - var params markettypes.PublishStorageDealsParams - if err := params.UnmarshalCBOR(bytes.NewReader(pubmsg.Params)); err != nil { - return 0, err - } - - dealIdx := -1 - for i, storageDeal := range params.Deals { - // TODO: make it less hacky - sd := storageDeal - eq, err := cborutil.Equals(&deal.ClientDealProposal, &sd) - if err != nil { - return 0, err - } - if eq { - dealIdx = i - break - } - } - - if dealIdx == -1 { - return 0, xerrors.Errorf("deal publish didn't contain our deal (message cid: %s)", deal.PublishMessage) - } - - // TODO: timeout - ret, err := c.StateWaitMsg(ctx, *deal.PublishMessage, build.MessageConfidence, api.LookbackNoLimit, true) - if err != nil { - return 0, xerrors.Errorf("waiting for deal publish message: %w", err) - } - if ret.Receipt.ExitCode != 0 { - return 0, xerrors.Errorf("deal publish failed: exit=%d", ret.Receipt.ExitCode) - } - - nv, err := c.StateNetworkVersion(ctx, ret.TipSet) - if err != nil { - return 0, xerrors.Errorf("getting network version: %w", err) - } - - res, err := marketactor.DecodePublishStorageDealsReturn(ret.Receipt.Return, nv) - if err != nil { - return 0, xerrors.Errorf("decoding deal publish return: %w", err) - } - - dealIDs, err := res.DealIDs() - if err != nil { - return 0, xerrors.Errorf("getting dealIDs: %w", err) - } - - if dealIdx >= len(params.Deals) { - return 0, xerrors.Errorf( - "deal index %d out of bounds of deals (len %d) in publish deals message %s", - dealIdx, len(params.Deals), pubmsg.Cid()) - } - - valid, outIdx, err := res.IsDealValid(uint64(dealIdx)) - if err != nil { - return 0, xerrors.Errorf("determining deal validity: %w", err) - } - - if !valid { - return 0, xerrors.New("deal was invalid at publication") - } - - return dealIDs[outIdx], nil -} - -var clientOverestimation = struct { - numerator int64 - denominator int64 -}{ - numerator: 12, - denominator: 10, -} - -func (c *ClientNodeAdapter) DealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, isVerified bool) (abi.TokenAmount, abi.TokenAmount, error) { - bounds, err := c.StateDealProviderCollateralBounds(ctx, size, isVerified, types.EmptyTSK) - if err != nil { - return abi.TokenAmount{}, abi.TokenAmount{}, err - } - - min := big.Mul(bounds.Min, big.NewInt(clientOverestimation.numerator)) - min = big.Div(min, big.NewInt(clientOverestimation.denominator)) - return min, bounds.Max, nil -} - -// TODO: Remove dealID parameter, change publishCid to be cid.Cid (instead of pointer) -func (c *ClientNodeAdapter) OnDealSectorPreCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, proposal markettypes.DealProposal, publishCid *cid.Cid, cb storagemarket.DealSectorPreCommittedCallback) error { - return c.scMgr.OnDealSectorPreCommitted(ctx, provider, proposal, *publishCid, cb) -} - -// TODO: Remove dealID parameter, change publishCid to be cid.Cid (instead of pointer) -func (c *ClientNodeAdapter) OnDealSectorCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, sectorNumber abi.SectorNumber, proposal markettypes.DealProposal, publishCid *cid.Cid, cb storagemarket.DealSectorCommittedCallback) error { - return c.scMgr.OnDealSectorCommitted(ctx, provider, sectorNumber, proposal, *publishCid, cb) -} - -// TODO: Replace dealID parameter with DealProposal -func (c *ClientNodeAdapter) OnDealExpiredOrSlashed(ctx context.Context, dealID abi.DealID, onDealExpired storagemarket.DealExpiredCallback, onDealSlashed storagemarket.DealSlashedCallback) error { - head, err := c.ChainHead(ctx) - if err != nil { - return xerrors.Errorf("client: failed to get chain head: %w", err) - } - - sd, err := c.StateMarketStorageDeal(ctx, dealID, head.Key()) - if err != nil { - return xerrors.Errorf("client: failed to look up deal %d on chain: %w", dealID, err) - } - - // Called immediately to check if the deal has already expired or been slashed - checkFunc := func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error) { - if ts == nil { - // keep listening for events - return false, true, nil - } - - // Check if the deal has already expired - if sd.Proposal.EndEpoch <= ts.Height() { - onDealExpired(nil) - return true, false, nil - } - - // If there is no deal assume it's already been slashed - if sd.State.SectorStartEpoch < 0 { - onDealSlashed(ts.Height(), nil) - return true, false, nil - } - - // No events have occurred yet, so return - // done: false, more: true (keep listening for events) - return false, true, nil - } - - // Called when there was a match against the state change we're looking for - // and the chain has advanced to the confidence height - stateChanged := func(ts *types.TipSet, ts2 *types.TipSet, states events.StateChange, h abi.ChainEpoch) (more bool, err error) { - // Check if the deal has already expired - if ts2 == nil || sd.Proposal.EndEpoch <= ts2.Height() { - onDealExpired(nil) - return false, nil - } - - // Timeout waiting for state change - if states == nil { - log.Error("timed out waiting for deal expiry") - return false, nil - } - - changedDeals, ok := states.(state.ChangedDeals) - if !ok { - panic("Expected state.ChangedDeals") - } - - deal, ok := changedDeals[dealID] - if !ok { - // No change to deal - return true, nil - } - - // Deal was slashed - if deal.To == nil { - onDealSlashed(ts2.Height(), nil) - return false, nil - } - - return true, nil - } - - // Called when there was a chain reorg and the state change was reverted - revert := func(ctx context.Context, ts *types.TipSet) error { - // TODO: Is it ok to just ignore this? - log.Warn("deal state reverted; TODO: actually handle this!") - return nil - } - - // Watch for state changes to the deal - match := c.dsMatcher.matcher(ctx, dealID) - - // Wait until after the end epoch for the deal and then timeout - timeout := (sd.Proposal.EndEpoch - head.Height()) + 1 - if err := c.ev.StateChanged(checkFunc, stateChanged, revert, int(build.MessageConfidence)+1, timeout, match); err != nil { - return xerrors.Errorf("failed to set up state changed handler: %w", err) - } - - return nil -} - -func (c *ClientNodeAdapter) SignProposal(ctx context.Context, signer address.Address, proposal markettypes.DealProposal) (*markettypes.ClientDealProposal, error) { - // TODO: output spec signed proposal - buf, err := cborutil.Dump(&proposal) - if err != nil { - return nil, err - } - - signer, err = c.StateAccountKey(ctx, signer, types.EmptyTSK) - if err != nil { - return nil, err - } - - sig, err := c.Wallet.WalletSign(ctx, signer, buf, api.MsgMeta{ - Type: api.MTDealProposal, - }) - if err != nil { - return nil, err - } - - return &markettypes.ClientDealProposal{ - Proposal: proposal, - ClientSignature: *sig, - }, nil -} - -func (c *ClientNodeAdapter) GetDefaultWalletAddress(ctx context.Context) (address.Address, error) { - addr, err := c.DefWallet.GetDefault() - return addr, err -} - -func (c *ClientNodeAdapter) GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) { - head, err := c.ChainHead(ctx) - if err != nil { - return nil, 0, err - } - - return head.Key().Bytes(), head.Height(), nil -} - -func (c *ClientNodeAdapter) WaitForMessage(ctx context.Context, mcid cid.Cid, cb func(code exitcode.ExitCode, bytes []byte, finalCid cid.Cid, err error) error) error { - receipt, err := c.StateWaitMsg(ctx, mcid, build.MessageConfidence, api.LookbackNoLimit, true) - if err != nil { - return cb(0, nil, cid.Undef, err) - } - return cb(receipt.Receipt.ExitCode, receipt.Receipt.Return, receipt.Message, nil) -} - -func (c *ClientNodeAdapter) GetMinerInfo(ctx context.Context, addr address.Address, encodedTs shared.TipSetToken) (*storagemarket.StorageProviderInfo, error) { - tsk, err := types.TipSetKeyFromBytes(encodedTs) - if err != nil { - return nil, err - } - mi, err := c.StateMinerInfo(ctx, addr, tsk) - if err != nil { - return nil, err - } - - out := utils.NewStorageProviderInfo(addr, mi.Worker, mi.SectorSize, *mi.PeerId, mi.Multiaddrs) - return &out, nil -} - -func (c *ClientNodeAdapter) SignBytes(ctx context.Context, signer address.Address, b []byte) (*crypto.Signature, error) { - signer, err := c.StateAccountKey(ctx, signer, types.EmptyTSK) - if err != nil { - return nil, err - } - - localSignature, err := c.Wallet.WalletSign(ctx, signer, b, api.MsgMeta{ - Type: api.MTUnknown, // TODO: pass type here - }) - if err != nil { - return nil, err - } - return localSignature, nil -} - -var _ storagemarket.StorageClientNode = &ClientNodeAdapter{} diff --git a/markets/storageadapter/client_blockstore.go b/markets/storageadapter/client_blockstore.go deleted file mode 100644 index 867e64493..000000000 --- a/markets/storageadapter/client_blockstore.go +++ /dev/null @@ -1,102 +0,0 @@ -package storageadapter - -import ( - "sync" - - blockstore "github.com/ipfs/boxo/blockstore" - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" - - "github.com/filecoin-project/boost-gfm/storagemarket" - "github.com/filecoin-project/boost-gfm/stores" - - "github.com/filecoin-project/lotus/node/repo/imports" -) - -// ProxyBlockstoreAccessor is an accessor that returns a fixed blockstore. -// To be used in combination with IPFS integration. -type ProxyBlockstoreAccessor struct { - Blockstore blockstore.Blockstore -} - -var _ storagemarket.BlockstoreAccessor = (*ProxyBlockstoreAccessor)(nil) - -func NewFixedBlockstoreAccessor(bs blockstore.Blockstore) storagemarket.BlockstoreAccessor { - return &ProxyBlockstoreAccessor{Blockstore: bs} -} - -func (p *ProxyBlockstoreAccessor) Get(cid storagemarket.PayloadCID) (blockstore.Blockstore, error) { - return p.Blockstore, nil -} - -func (p *ProxyBlockstoreAccessor) Done(cid storagemarket.PayloadCID) error { - return nil -} - -// ImportsBlockstoreAccessor is a blockstore accessor backed by the -// imports.Manager. -type ImportsBlockstoreAccessor struct { - m *imports.Manager - lk sync.Mutex - open map[cid.Cid]struct { - st stores.ClosableBlockstore - refs int - } -} - -var _ storagemarket.BlockstoreAccessor = (*ImportsBlockstoreAccessor)(nil) - -func NewImportsBlockstoreAccessor(importmgr *imports.Manager) *ImportsBlockstoreAccessor { - return &ImportsBlockstoreAccessor{ - m: importmgr, - open: make(map[cid.Cid]struct { - st stores.ClosableBlockstore - refs int - }), - } -} - -func (s *ImportsBlockstoreAccessor) Get(payloadCID storagemarket.PayloadCID) (blockstore.Blockstore, error) { - s.lk.Lock() - defer s.lk.Unlock() - - e, ok := s.open[payloadCID] - if ok { - e.refs++ - return e.st, nil - } - - path, err := s.m.CARPathFor(payloadCID) - if err != nil { - return nil, xerrors.Errorf("failed to get client blockstore for root %s: %w", payloadCID, err) - } - if path == "" { - return nil, xerrors.Errorf("no client blockstore for root %s", payloadCID) - } - ret, err := stores.ReadOnlyFilestore(path) - if err != nil { - return nil, err - } - e.st = ret - s.open[payloadCID] = e - return ret, nil -} - -func (s *ImportsBlockstoreAccessor) Done(payloadCID storagemarket.PayloadCID) error { - s.lk.Lock() - defer s.lk.Unlock() - - e, ok := s.open[payloadCID] - if !ok { - return nil - } - - e.refs-- - if e.refs == 0 { - if err := e.st.Close(); err != nil { - log.Warnf("failed to close blockstore: %s", err) - } - delete(s.open, payloadCID) - } - return nil -} diff --git a/markets/storageadapter/dealpublisher.go b/markets/storageadapter/dealpublisher.go index 74d8a7d19..1b09c8391 100644 --- a/markets/storageadapter/dealpublisher.go +++ b/markets/storageadapter/dealpublisher.go @@ -9,6 +9,7 @@ import ( cborutil "github.com/filecoin-project/go-cbor-util" "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" "go.uber.org/fx" "golang.org/x/xerrors" @@ -27,6 +28,8 @@ import ( "github.com/filecoin-project/lotus/storage/ctladdr" ) +var log = logging.Logger("storageadapter") + type dealPublisherAPI interface { ChainHead(context.Context) (*types.TipSet, error) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) diff --git a/markets/storageadapter/dealstatematcher.go b/markets/storageadapter/dealstatematcher.go deleted file mode 100644 index 8d5598eae..000000000 --- a/markets/storageadapter/dealstatematcher.go +++ /dev/null @@ -1,85 +0,0 @@ -package storageadapter - -import ( - "context" - "sync" - - "github.com/filecoin-project/go-state-types/abi" - - actorsmarket "github.com/filecoin-project/lotus/chain/actors/builtin/market" - "github.com/filecoin-project/lotus/chain/events" - "github.com/filecoin-project/lotus/chain/events/state" - "github.com/filecoin-project/lotus/chain/types" -) - -// dealStateMatcher caches the DealStates for the most recent -// old/new tipset combination -type dealStateMatcher struct { - preds *state.StatePredicates - - lk sync.Mutex - oldTsk types.TipSetKey - newTsk types.TipSetKey - oldDealStateRoot actorsmarket.DealStates - newDealStateRoot actorsmarket.DealStates -} - -func newDealStateMatcher(preds *state.StatePredicates) *dealStateMatcher { - return &dealStateMatcher{preds: preds} -} - -// matcher returns a function that checks if the state of the given dealID -// has changed. -// It caches the DealStates for the most recent old/new tipset combination. -func (mc *dealStateMatcher) matcher(ctx context.Context, dealID abi.DealID) events.StateMatchFunc { - // The function that is called to check if the deal state has changed for - // the target deal ID - dealStateChangedForID := mc.preds.DealStateChangedForIDs([]abi.DealID{dealID}) - - // The match function is called by the events API to check if there's - // been a state change for the deal with the target deal ID - match := func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) { - mc.lk.Lock() - defer mc.lk.Unlock() - - // Check if we've already fetched the DealStates for the given tipsets - if mc.oldTsk == oldTs.Key() && mc.newTsk == newTs.Key() { - // If we fetch the DealStates and there is no difference between - // them, they are stored as nil. So we can just bail out. - if mc.oldDealStateRoot == nil || mc.newDealStateRoot == nil { - return false, nil, nil - } - - // Check if the deal state has changed for the target ID - return dealStateChangedForID(ctx, mc.oldDealStateRoot, mc.newDealStateRoot) - } - - // We haven't already fetched the DealStates for the given tipsets, so - // do so now - - // Replace dealStateChangedForID with a function that records the - // DealStates so that we can cache them - var oldDealStateRootSaved, newDealStateRootSaved actorsmarket.DealStates - recorder := func(ctx context.Context, oldDealStateRoot, newDealStateRoot actorsmarket.DealStates) (changed bool, user state.UserData, err error) { - // Record DealStates - oldDealStateRootSaved = oldDealStateRoot - newDealStateRootSaved = newDealStateRoot - - return dealStateChangedForID(ctx, oldDealStateRoot, newDealStateRoot) - } - - // Call the match function - dealDiff := mc.preds.OnStorageMarketActorChanged( - mc.preds.OnDealStateChanged(recorder)) - matched, data, err := dealDiff(ctx, oldTs.Key(), newTs.Key()) - - // Save the recorded DealStates for the tipsets - mc.oldTsk = oldTs.Key() - mc.newTsk = newTs.Key() - mc.oldDealStateRoot = oldDealStateRootSaved - mc.newDealStateRoot = newDealStateRootSaved - - return matched, data, err - } - return match -} diff --git a/markets/storageadapter/dealstatematcher_test.go b/markets/storageadapter/dealstatematcher_test.go deleted file mode 100644 index 9a46e4af9..000000000 --- a/markets/storageadapter/dealstatematcher_test.go +++ /dev/null @@ -1,155 +0,0 @@ -// stm: #unit -package storageadapter - -import ( - "context" - "testing" - - "github.com/ipfs/go-cid" - cbornode "github.com/ipfs/go-ipld-cbor" - "github.com/stretchr/testify/require" - "golang.org/x/sync/errgroup" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" - market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" - adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt" - - bstore "github.com/filecoin-project/lotus/blockstore" - "github.com/filecoin-project/lotus/chain/events" - "github.com/filecoin-project/lotus/chain/events/state" - test "github.com/filecoin-project/lotus/chain/events/state/mock" - "github.com/filecoin-project/lotus/chain/types" -) - -func TestDealStateMatcher(t *testing.T) { - //stm: @CHAIN_STATE_GET_ACTOR_001 - ctx := context.Background() - bs := bstore.NewMemorySync() - store := adt2.WrapStore(ctx, cbornode.NewCborStore(bs)) - - deal1 := &market2.DealState{ - SectorStartEpoch: 1, - LastUpdatedEpoch: 2, - } - deal2 := &market2.DealState{ - SectorStartEpoch: 4, - LastUpdatedEpoch: 5, - } - deal3 := &market2.DealState{ - SectorStartEpoch: 7, - LastUpdatedEpoch: 8, - } - deals1 := map[abi.DealID]*market2.DealState{ - abi.DealID(1): deal1, - } - deals2 := map[abi.DealID]*market2.DealState{ - abi.DealID(1): deal2, - } - deals3 := map[abi.DealID]*market2.DealState{ - abi.DealID(1): deal3, - } - - deal1StateC := createMarketState(ctx, t, store, deals1) - deal2StateC := createMarketState(ctx, t, store, deals2) - deal3StateC := createMarketState(ctx, t, store, deals3) - - minerAddr, err := address.NewFromString("t00") - require.NoError(t, err) - ts1, err := test.MockTipset(minerAddr, 1) - require.NoError(t, err) - ts2, err := test.MockTipset(minerAddr, 2) - require.NoError(t, err) - ts3, err := test.MockTipset(minerAddr, 3) - require.NoError(t, err) - - api := test.NewMockAPI(bs) - api.SetActor(ts1.Key(), &types.Actor{Code: builtin2.StorageMarketActorCodeID, Head: deal1StateC}) - api.SetActor(ts2.Key(), &types.Actor{Code: builtin2.StorageMarketActorCodeID, Head: deal2StateC}) - api.SetActor(ts3.Key(), &types.Actor{Code: builtin2.StorageMarketActorCodeID, Head: deal3StateC}) - - t.Run("caching", func(t *testing.T) { - dsm := newDealStateMatcher(state.NewStatePredicates(api)) - matcher := dsm.matcher(ctx, abi.DealID(1)) - - // Call matcher with tipsets that have the same state - ok, stateChange, err := matcher(ts1, ts1) - require.NoError(t, err) - require.False(t, ok) - require.Nil(t, stateChange) - // Should call StateGetActor once for each tipset - require.Equal(t, 2, api.StateGetActorCallCount()) - - // Call matcher with tipsets that have different state - api.ResetCallCounts() - ok, stateChange, err = matcher(ts1, ts2) - require.NoError(t, err) - require.True(t, ok) - require.NotNil(t, stateChange) - // Should call StateGetActor once for each tipset - require.Equal(t, 2, api.StateGetActorCallCount()) - - // Call matcher again with the same tipsets as above, should be cached - api.ResetCallCounts() - ok, stateChange, err = matcher(ts1, ts2) - require.NoError(t, err) - require.True(t, ok) - require.NotNil(t, stateChange) - // Should not call StateGetActor (because it should hit the cache) - require.Equal(t, 0, api.StateGetActorCallCount()) - - // Call matcher with different tipsets, should not be cached - api.ResetCallCounts() - ok, stateChange, err = matcher(ts2, ts3) - require.NoError(t, err) - require.True(t, ok) - require.NotNil(t, stateChange) - // Should call StateGetActor once for each tipset - require.Equal(t, 2, api.StateGetActorCallCount()) - }) - - t.Run("parallel", func(t *testing.T) { - api.ResetCallCounts() - dsm := newDealStateMatcher(state.NewStatePredicates(api)) - matcher := dsm.matcher(ctx, abi.DealID(1)) - - // Call matcher with lots of go-routines in parallel - var eg errgroup.Group - res := make([]struct { - ok bool - stateChange events.StateChange - }, 20) - for i := 0; i < len(res); i++ { - i := i - eg.Go(func() error { - ok, stateChange, err := matcher(ts1, ts2) - res[i].ok = ok - res[i].stateChange = stateChange - return err - }) - } - err := eg.Wait() - require.NoError(t, err) - - // All go-routines should have got the same (cached) result - for i := 1; i < len(res); i++ { - require.Equal(t, res[i].ok, res[i-1].ok) - require.Equal(t, res[i].stateChange, res[i-1].stateChange) - } - - // Only one go-routine should have called StateGetActor - // (once for each tipset) - require.Equal(t, 2, api.StateGetActorCallCount()) - }) -} - -func createMarketState(ctx context.Context, t *testing.T, store adt2.Store, deals map[abi.DealID]*market2.DealState) cid.Cid { - dealRootCid := test.CreateDealAMT(ctx, t, store, deals) - state := test.CreateEmptyMarketState(t, store) - state.States = dealRootCid - - stateC, err := store.Put(ctx, state) - require.NoError(t, err) - return stateC -} diff --git a/markets/storageadapter/ondealsectorcommitted.go b/markets/storageadapter/ondealsectorcommitted.go deleted file mode 100644 index e922da0cf..000000000 --- a/markets/storageadapter/ondealsectorcommitted.go +++ /dev/null @@ -1,399 +0,0 @@ -package storageadapter - -import ( - "bytes" - "context" - "sync" - - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" - - "github.com/filecoin-project/boost-gfm/storagemarket" - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/builtin/v8/miner" - "github.com/filecoin-project/go-state-types/builtin/v9/market" - - "github.com/filecoin-project/lotus/build" - lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/events" - "github.com/filecoin-project/lotus/chain/types" - pipeline "github.com/filecoin-project/lotus/storage/pipeline" -) - -type eventsCalledAPI interface { - Called(ctx context.Context, check events.CheckFunc, msgHnd events.MsgHandler, rev events.RevertHandler, confidence int, timeout abi.ChainEpoch, mf events.MsgMatchFunc) error -} - -type dealInfoAPI interface { - GetCurrentDealInfo(ctx context.Context, tsk types.TipSetKey, proposal *market.DealProposal, publishCid cid.Cid) (pipeline.CurrentDealInfo, error) -} - -type diffPreCommitsAPI interface { - diffPreCommits(ctx context.Context, actor address.Address, pre, cur types.TipSetKey) (*lminer.PreCommitChanges, error) -} - -type SectorCommittedManager struct { - ev eventsCalledAPI - dealInfo dealInfoAPI - dpc diffPreCommitsAPI -} - -func NewSectorCommittedManager(ev eventsCalledAPI, tskAPI pipeline.CurrentDealInfoAPI, dpcAPI diffPreCommitsAPI) *SectorCommittedManager { - dim := &pipeline.CurrentDealInfoManager{ - CDAPI: tskAPI, - } - return newSectorCommittedManager(ev, dim, dpcAPI) -} - -func newSectorCommittedManager(ev eventsCalledAPI, dealInfo dealInfoAPI, dpcAPI diffPreCommitsAPI) *SectorCommittedManager { - return &SectorCommittedManager{ - ev: ev, - dealInfo: dealInfo, - dpc: dpcAPI, - } -} - -func (mgr *SectorCommittedManager) OnDealSectorPreCommitted(ctx context.Context, provider address.Address, proposal market.DealProposal, publishCid cid.Cid, callback storagemarket.DealSectorPreCommittedCallback) error { - // Ensure callback is only called once - var once sync.Once - cb := func(sectorNumber abi.SectorNumber, isActive bool, err error) { - once.Do(func() { - callback(sectorNumber, isActive, err) - }) - } - - // First check if the deal is already active, and if so, bail out - checkFunc := func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error) { - dealInfo, isActive, err := mgr.checkIfDealAlreadyActive(ctx, ts, &proposal, publishCid) - if err != nil { - // Note: the error returned from here will end up being returned - // from OnDealSectorPreCommitted so no need to call the callback - // with the error - return false, false, xerrors.Errorf("failed to check deal activity: %w", err) - } - - if isActive { - // Deal is already active, bail out - cb(0, true, nil) - return true, false, nil - } - - // Check that precommits which landed between when the deal was published - // and now don't already contain the deal we care about. - // (this can happen when the precommit lands vary quickly (in tests), or - // when the client node was down after the deal was published, and when - // the precommit containing it landed on chain) - - diff, err := mgr.dpc.diffPreCommits(ctx, provider, dealInfo.PublishMsgTipSet, ts.Key()) - if err != nil { - return false, false, xerrors.Errorf("failed to diff precommits: %w", err) - } - - for _, info := range diff.Added { - for _, d := range info.Info.DealIDs { - if d == dealInfo.DealID { - cb(info.Info.SectorNumber, false, nil) - return true, false, nil - } - } - } - - // Not yet active, start matching against incoming messages - return false, true, nil - } - - // Watch for a pre-commit message to the provider. - matchEvent := func(msg *types.Message) (bool, error) { - matched := msg.To == provider && (msg.Method == builtin.MethodsMiner.PreCommitSector || msg.Method == builtin.MethodsMiner.PreCommitSectorBatch || msg.Method == builtin.MethodsMiner.ProveReplicaUpdates) - return matched, nil - } - - // The deal must be accepted by the deal proposal start epoch, so timeout - // if the chain reaches that epoch - timeoutEpoch := proposal.StartEpoch + 1 - - // Check if the message params included the deal ID we're looking for. - called := func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error) { - defer func() { - if err != nil { - cb(0, false, xerrors.Errorf("handling applied event: %w", err)) - } - }() - - // If the deal hasn't been activated by the proposed start epoch, the - // deal will timeout (when msg == nil it means the timeout epoch was reached) - if msg == nil { - err = xerrors.Errorf("deal with piece CID %s was not activated by proposed deal start epoch %d", proposal.PieceCID, proposal.StartEpoch) - return false, err - } - - // Ignore the pre-commit message if it was not executed successfully - if rec.ExitCode != 0 { - return true, nil - } - - // When there is a reorg, the deal ID may change, so get the - // current deal ID from the publish message CID - res, err := mgr.dealInfo.GetCurrentDealInfo(ctx, ts.Key(), &proposal, publishCid) - if err != nil { - return false, xerrors.Errorf("failed to get dealinfo: %w", err) - } - - // If this is a replica update method that succeeded the deal is active - if msg.Method == builtin.MethodsMiner.ProveReplicaUpdates { - sn, err := dealSectorInReplicaUpdateSuccess(msg, rec, res) - if err != nil { - return false, err - } - if sn != nil { - cb(*sn, true, nil) - return false, nil - } - // Didn't find the deal ID in this message, so keep looking - return true, nil - } - - // Extract the message parameters - sn, err := dealSectorInPreCommitMsg(msg, res) - if err != nil { - return false, xerrors.Errorf("failed to extract message params: %w", err) - } - - if sn != nil { - cb(*sn, false, nil) - } - - // Didn't find the deal ID in this message, so keep looking - return true, nil - } - - revert := func(ctx context.Context, ts *types.TipSet) error { - log.Warn("deal pre-commit reverted; TODO: actually handle this!") - // TODO: Just go back to DealSealing? - return nil - } - - if err := mgr.ev.Called(ctx, checkFunc, called, revert, int(build.MessageConfidence+1), timeoutEpoch, matchEvent); err != nil { - return xerrors.Errorf("failed to set up called handler: %w", err) - } - - return nil -} - -func (mgr *SectorCommittedManager) OnDealSectorCommitted(ctx context.Context, provider address.Address, sectorNumber abi.SectorNumber, proposal market.DealProposal, publishCid cid.Cid, callback storagemarket.DealSectorCommittedCallback) error { - // Ensure callback is only called once - var once sync.Once - cb := func(err error) { - once.Do(func() { - callback(err) - }) - } - - // First check if the deal is already active, and if so, bail out - checkFunc := func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error) { - _, isActive, err := mgr.checkIfDealAlreadyActive(ctx, ts, &proposal, publishCid) - if err != nil { - // Note: the error returned from here will end up being returned - // from OnDealSectorCommitted so no need to call the callback - // with the error - return false, false, err - } - - if isActive { - // Deal is already active, bail out - cb(nil) - return true, false, nil - } - - // Not yet active, start matching against incoming messages - return false, true, nil - } - - // Match a prove-commit sent to the provider with the given sector number - matchEvent := func(msg *types.Message) (matched bool, err error) { - if msg.To != provider { - return false, nil - } - - return sectorInCommitMsg(msg, sectorNumber) - } - - // The deal must be accepted by the deal proposal start epoch, so timeout - // if the chain reaches that epoch - timeoutEpoch := proposal.StartEpoch + 1 - - called := func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error) { - defer func() { - if err != nil { - cb(xerrors.Errorf("handling applied event: %w", err)) - } - }() - - // If the deal hasn't been activated by the proposed start epoch, the - // deal will timeout (when msg == nil it means the timeout epoch was reached) - if msg == nil { - err := xerrors.Errorf("deal with piece CID %s was not activated by proposed deal start epoch %d", proposal.PieceCID, proposal.StartEpoch) - return false, err - } - - // Ignore the prove-commit message if it was not executed successfully - if rec.ExitCode != 0 { - return true, nil - } - - // Get the deal info - res, err := mgr.dealInfo.GetCurrentDealInfo(ctx, ts.Key(), &proposal, publishCid) - if err != nil { - return false, xerrors.Errorf("failed to look up deal on chain: %w", err) - } - - // Make sure the deal is active - if res.MarketDeal.State.SectorStartEpoch < 1 { - return false, xerrors.Errorf("deal wasn't active: deal=%d, parentState=%s, h=%d", res.DealID, ts.ParentState(), ts.Height()) - } - - log.Infof("Storage deal %d activated at epoch %d", res.DealID, res.MarketDeal.State.SectorStartEpoch) - - cb(nil) - - return false, nil - } - - revert := func(ctx context.Context, ts *types.TipSet) error { - log.Warn("deal activation reverted; TODO: actually handle this!") - // TODO: Just go back to DealSealing? - return nil - } - - if err := mgr.ev.Called(ctx, checkFunc, called, revert, int(build.MessageConfidence+1), timeoutEpoch, matchEvent); err != nil { - return xerrors.Errorf("failed to set up called handler: %w", err) - } - - return nil -} - -func dealSectorInReplicaUpdateSuccess(msg *types.Message, rec *types.MessageReceipt, res pipeline.CurrentDealInfo) (*abi.SectorNumber, error) { - var params miner.ProveReplicaUpdatesParams - if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { - return nil, xerrors.Errorf("unmarshal prove replica update: %w", err) - } - - var seekUpdate miner.ReplicaUpdate - var found bool - for _, update := range params.Updates { - for _, did := range update.Deals { - if did == res.DealID { - seekUpdate = update - found = true - break - } - } - } - if !found { - return nil, nil - } - - // check that this update passed validation steps - var successBf bitfield.BitField - if err := successBf.UnmarshalCBOR(bytes.NewReader(rec.Return)); err != nil { - return nil, xerrors.Errorf("unmarshal return value: %w", err) - } - success, err := successBf.IsSet(uint64(seekUpdate.SectorID)) - if err != nil { - return nil, xerrors.Errorf("failed to check success of replica update: %w", err) - } - if !success { - return nil, xerrors.Errorf("replica update %d failed", seekUpdate.SectorID) - } - return &seekUpdate.SectorID, nil -} - -// dealSectorInPreCommitMsg tries to find a sector containing the specified deal -func dealSectorInPreCommitMsg(msg *types.Message, res pipeline.CurrentDealInfo) (*abi.SectorNumber, error) { - switch msg.Method { - case builtin.MethodsMiner.PreCommitSector: - var params miner.SectorPreCommitInfo - if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { - return nil, xerrors.Errorf("unmarshal pre commit: %w", err) - } - - // Check through the deal IDs associated with this message - for _, did := range params.DealIDs { - if did == res.DealID { - // Found the deal ID in this message. Callback with the sector ID. - return ¶ms.SectorNumber, nil - } - } - case builtin.MethodsMiner.PreCommitSectorBatch: - var params miner.PreCommitSectorBatchParams - if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { - return nil, xerrors.Errorf("unmarshal pre commit: %w", err) - } - - for _, precommit := range params.Sectors { - // Check through the deal IDs associated with this message - for _, did := range precommit.DealIDs { - if did == res.DealID { - // Found the deal ID in this message. Callback with the sector ID. - return &precommit.SectorNumber, nil - } - } - } - default: - return nil, xerrors.Errorf("unexpected method %d", msg.Method) - } - - return nil, nil -} - -// sectorInCommitMsg checks if the provided message commits specified sector -func sectorInCommitMsg(msg *types.Message, sectorNumber abi.SectorNumber) (bool, error) { - switch msg.Method { - case builtin.MethodsMiner.ProveCommitSector: - var params miner.ProveCommitSectorParams - if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { - return false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err) - } - - return params.SectorNumber == sectorNumber, nil - - case builtin.MethodsMiner.ProveCommitAggregate: - var params miner.ProveCommitAggregateParams - if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { - return false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err) - } - - set, err := params.SectorNumbers.IsSet(uint64(sectorNumber)) - if err != nil { - return false, xerrors.Errorf("checking if sectorNumber is set in commit aggregate message: %w", err) - } - - return set, nil - - default: - return false, nil - } -} - -func (mgr *SectorCommittedManager) checkIfDealAlreadyActive(ctx context.Context, ts *types.TipSet, proposal *market.DealProposal, publishCid cid.Cid) (pipeline.CurrentDealInfo, bool, error) { - res, err := mgr.dealInfo.GetCurrentDealInfo(ctx, ts.Key(), proposal, publishCid) - if err != nil { - // TODO: This may be fine for some errors - return res, false, xerrors.Errorf("failed to look up deal on chain: %w", err) - } - - // Sector was slashed - if res.MarketDeal.State.SlashEpoch > 0 { - return res, false, xerrors.Errorf("deal %d was slashed at epoch %d", res.DealID, res.MarketDeal.State.SlashEpoch) - } - - // Sector with deal is already active - if res.MarketDeal.State.SectorStartEpoch > 0 { - return res, true, nil - } - - return res, false, nil -} diff --git a/markets/storageadapter/ondealsectorcommitted_test.go b/markets/storageadapter/ondealsectorcommitted_test.go deleted file mode 100644 index 6edefc2aa..000000000 --- a/markets/storageadapter/ondealsectorcommitted_test.go +++ /dev/null @@ -1,581 +0,0 @@ -// stm: #unit -package storageadapter - -import ( - "bytes" - "context" - "errors" - "fmt" - "math/rand" - "testing" - "time" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/builtin" - markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market" - minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" - "github.com/filecoin-project/go-state-types/cbor" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors/builtin/market" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/events" - test "github.com/filecoin-project/lotus/chain/events/state/mock" - "github.com/filecoin-project/lotus/chain/types" - pipeline "github.com/filecoin-project/lotus/storage/pipeline" - tutils "github.com/filecoin-project/specs-actors/v2/support/testing" - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" - "golang.org/x/xerrors" -) - -func TestOnDealSectorPreCommitted(t *testing.T) { - label, err := markettypes.NewLabelFromString("success") - require.NoError(t, err) - - provider := address.TestAddress - ctx := context.Background() - publishCid := generateCids(1)[0] - sealedCid := generateCids(1)[0] - pieceCid := generateCids(1)[0] - dealID := abi.DealID(rand.Uint64()) - sectorNumber := abi.SectorNumber(rand.Uint64()) - proposal := market.DealProposal{ - PieceCID: pieceCid, - PieceSize: abi.PaddedPieceSize(rand.Uint64()), - Client: tutils.NewActorAddr(t, "client"), - Provider: tutils.NewActorAddr(t, "provider"), - StoragePricePerEpoch: abi.NewTokenAmount(1), - ProviderCollateral: abi.NewTokenAmount(1), - ClientCollateral: abi.NewTokenAmount(1), - Label: label, - } - unfinishedDeal := &api.MarketDeal{ - Proposal: proposal, - State: market.DealState{ - SectorStartEpoch: -1, - LastUpdatedEpoch: 2, - }, - } - activeDeal := &api.MarketDeal{ - Proposal: proposal, - State: market.DealState{ - SectorStartEpoch: 1, - LastUpdatedEpoch: 2, - }, - } - slashedDeal := &api.MarketDeal{ - Proposal: proposal, - State: market.DealState{ - SectorStartEpoch: 1, - LastUpdatedEpoch: 2, - SlashEpoch: 2, - }, - } - type testCase struct { - currentDealInfo pipeline.CurrentDealInfo - currentDealInfoErr error - currentDealInfoErr2 error - preCommitDiff *miner.PreCommitChanges - matchStates []matchState - dealStartEpochTimeout bool - expectedCBCallCount uint64 - expectedCBSectorNumber abi.SectorNumber - expectedCBIsActive bool - expectedCBError error - expectedError error - } - testCases := map[string]testCase{ - "normal sequence": { - currentDealInfo: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: unfinishedDeal, - }, - matchStates: []matchState{ - { - msg: makeMessage(t, provider, builtin.MethodsMiner.PreCommitSector, &minertypes.PreCommitSectorParams{ - SectorNumber: sectorNumber, - SealedCID: sealedCid, - DealIDs: []abi.DealID{dealID}, - }), - }, - }, - expectedCBCallCount: 1, - expectedCBIsActive: false, - expectedCBSectorNumber: sectorNumber, - }, - "ignores unsuccessful pre-commit message": { - currentDealInfo: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: unfinishedDeal, - }, - matchStates: []matchState{ - { - msg: makeMessage(t, provider, builtin.MethodsMiner.PreCommitSector, &minertypes.PreCommitSectorParams{ - SectorNumber: sectorNumber, - SealedCID: sealedCid, - DealIDs: []abi.DealID{dealID}, - }), - // non-zero exit code indicates unsuccessful pre-commit message - receipt: &types.MessageReceipt{ExitCode: 1}, - }, - }, - expectedCBCallCount: 0, - }, - "deal already pre-committed": { - currentDealInfo: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: unfinishedDeal, - }, - preCommitDiff: &miner.PreCommitChanges{ - Added: []minertypes.SectorPreCommitOnChainInfo{{ - Info: minertypes.SectorPreCommitInfo{ - SectorNumber: sectorNumber, - DealIDs: []abi.DealID{dealID}, - }, - }}, - }, - expectedCBCallCount: 1, - expectedCBIsActive: false, - expectedCBSectorNumber: sectorNumber, - }, - "error getting current deal info in check func": { - currentDealInfoErr: errors.New("something went wrong"), - expectedCBCallCount: 0, - expectedError: xerrors.Errorf("failed to set up called handler: failed to check deal activity: failed to look up deal on chain: something went wrong"), - }, - "sector already active": { - currentDealInfo: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: activeDeal, - }, - expectedCBCallCount: 1, - expectedCBIsActive: true, - }, - "sector was slashed": { - currentDealInfo: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: slashedDeal, - PublishMsgTipSet: types.EmptyTSK, - }, - expectedCBCallCount: 0, - expectedError: xerrors.Errorf("failed to set up called handler: failed to check deal activity: deal %d was slashed at epoch %d", dealID, slashedDeal.State.SlashEpoch), - }, - "error getting current deal info in called func": { - currentDealInfo: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: unfinishedDeal, - }, - currentDealInfoErr2: errors.New("something went wrong"), - matchStates: []matchState{ - { - msg: makeMessage(t, provider, builtin.MethodsMiner.PreCommitSector, &minertypes.PreCommitSectorParams{ - SectorNumber: sectorNumber, - SealedCID: sealedCid, - DealIDs: []abi.DealID{dealID}, - }), - }, - }, - expectedCBCallCount: 1, - expectedCBError: errors.New("handling applied event: failed to get dealinfo: something went wrong"), - }, - "proposed deal epoch timeout": { - currentDealInfo: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: activeDeal, - }, - dealStartEpochTimeout: true, - expectedCBCallCount: 1, - expectedCBError: xerrors.Errorf("handling applied event: deal with piece CID %s was not activated by proposed deal start epoch 0", unfinishedDeal.Proposal.PieceCID), - }, - } - runTestCase := func(testCase string, data testCase) { - t.Run(testCase, func(t *testing.T) { - checkTs, err := test.MockTipset(provider, rand.Uint64()) - require.NoError(t, err) - matchMessages := make([]matchMessage, len(data.matchStates)) - for i, ms := range data.matchStates { - matchTs, err := test.MockTipset(provider, rand.Uint64()) - require.NoError(t, err) - matchMessages[i] = matchMessage{ - curH: 5, - msg: ms.msg, - msgReceipt: ms.receipt, - ts: matchTs, - } - } - eventsAPI := &fakeEvents{ - Ctx: ctx, - CheckTs: checkTs, - MatchMessages: matchMessages, - DealStartEpochTimeout: data.dealStartEpochTimeout, - } - cbCallCount := uint64(0) - var cbSectorNumber abi.SectorNumber - var cbIsActive bool - var cbError error - cb := func(secNum abi.SectorNumber, isActive bool, err error) { - cbCallCount++ - cbSectorNumber = secNum - cbIsActive = isActive - cbError = err - } - - mockPCAPI := &mockPreCommitsAPI{ - PCChanges: data.preCommitDiff, - } - mockDIAPI := &mockDealInfoAPI{ - CurrentDealInfo: data.currentDealInfo, - CurrentDealInfo2: data.currentDealInfo, - Err: data.currentDealInfoErr, - Err2: data.currentDealInfoErr2, - } - scm := newSectorCommittedManager(eventsAPI, mockDIAPI, mockPCAPI) - //stm: @MARKET_ADAPTER_ON_SECTOR_PRE_COMMIT_001 - err = scm.OnDealSectorPreCommitted(ctx, provider, proposal, publishCid, cb) - if data.expectedError == nil { - require.NoError(t, err) - } else { - require.EqualError(t, err, data.expectedError.Error()) - } - require.Equal(t, data.expectedCBSectorNumber, cbSectorNumber) - require.Equal(t, data.expectedCBIsActive, cbIsActive) - require.Equal(t, data.expectedCBCallCount, cbCallCount) - if data.expectedCBError == nil { - require.NoError(t, cbError) - } else { - require.EqualError(t, cbError, data.expectedCBError.Error()) - } - }) - } - for testCase, data := range testCases { - runTestCase(testCase, data) - } -} - -func TestOnDealSectorCommitted(t *testing.T) { - label, err := markettypes.NewLabelFromString("success") - require.NoError(t, err) - - provider := address.TestAddress - publishCid := generateCids(1)[0] - pieceCid := generateCids(1)[0] - dealID := abi.DealID(rand.Uint64()) - sectorNumber := abi.SectorNumber(rand.Uint64()) - proposal := market.DealProposal{ - PieceCID: pieceCid, - PieceSize: abi.PaddedPieceSize(rand.Uint64()), - Client: tutils.NewActorAddr(t, "client"), - Provider: tutils.NewActorAddr(t, "provider"), - StoragePricePerEpoch: abi.NewTokenAmount(1), - ProviderCollateral: abi.NewTokenAmount(1), - ClientCollateral: abi.NewTokenAmount(1), - Label: label, - } - unfinishedDeal := &api.MarketDeal{ - Proposal: proposal, - State: market.DealState{ - SectorStartEpoch: -1, - LastUpdatedEpoch: 2, - }, - } - activeDeal := &api.MarketDeal{ - Proposal: proposal, - State: market.DealState{ - SectorStartEpoch: 1, - LastUpdatedEpoch: 2, - }, - } - slashedDeal := &api.MarketDeal{ - Proposal: proposal, - State: market.DealState{ - SectorStartEpoch: 1, - LastUpdatedEpoch: 2, - SlashEpoch: 2, - }, - } - type testCase struct { - currentDealInfo pipeline.CurrentDealInfo - currentDealInfoErr error - currentDealInfo2 pipeline.CurrentDealInfo - currentDealInfoErr2 error - matchStates []matchState - dealStartEpochTimeout bool - expectedCBCallCount uint64 - expectedCBError error - expectedError error - } - testCases := map[string]testCase{ - "normal sequence": { - currentDealInfo: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: unfinishedDeal, - }, - currentDealInfo2: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: activeDeal, - }, - matchStates: []matchState{ - { - msg: makeMessage(t, provider, builtin.MethodsMiner.ProveCommitSector, &minertypes.ProveCommitSectorParams{ - SectorNumber: sectorNumber, - }), - }, - }, - expectedCBCallCount: 1, - }, - "ignores unsuccessful prove-commit message": { - currentDealInfo: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: unfinishedDeal, - }, - currentDealInfo2: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: activeDeal, - }, - matchStates: []matchState{ - { - msg: makeMessage(t, provider, builtin.MethodsMiner.ProveCommitSector, &minertypes.ProveCommitSectorParams{ - SectorNumber: sectorNumber, - }), - // Exit-code 1 means the prove-commit was unsuccessful - receipt: &types.MessageReceipt{ExitCode: 1}, - }, - }, - expectedCBCallCount: 0, - }, - "error getting current deal info in check func": { - currentDealInfoErr: errors.New("something went wrong"), - expectedCBCallCount: 0, - expectedError: xerrors.Errorf("failed to set up called handler: failed to look up deal on chain: something went wrong"), - }, - "sector already active": { - currentDealInfo: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: activeDeal, - }, - expectedCBCallCount: 1, - }, - "sector was slashed": { - currentDealInfo: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: slashedDeal, - }, - expectedCBCallCount: 0, - expectedError: xerrors.Errorf("failed to set up called handler: deal %d was slashed at epoch %d", dealID, slashedDeal.State.SlashEpoch), - }, - "error getting current deal info in called func": { - currentDealInfo: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: unfinishedDeal, - }, - currentDealInfoErr2: errors.New("something went wrong"), - matchStates: []matchState{ - { - msg: makeMessage(t, provider, builtin.MethodsMiner.ProveCommitSector, &minertypes.ProveCommitSectorParams{ - SectorNumber: sectorNumber, - }), - }, - }, - expectedCBCallCount: 1, - expectedCBError: xerrors.Errorf("handling applied event: failed to look up deal on chain: something went wrong"), - }, - "proposed deal epoch timeout": { - currentDealInfo: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: unfinishedDeal, - }, - dealStartEpochTimeout: true, - expectedCBCallCount: 1, - expectedCBError: xerrors.Errorf("handling applied event: deal with piece CID %s was not activated by proposed deal start epoch 0", unfinishedDeal.Proposal.PieceCID), - }, - "got prove-commit but deal not active": { - currentDealInfo: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: unfinishedDeal, - }, - currentDealInfo2: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: unfinishedDeal, - }, - matchStates: []matchState{ - { - msg: makeMessage(t, provider, builtin.MethodsMiner.ProveCommitSector, &minertypes.ProveCommitSectorParams{ - SectorNumber: sectorNumber, - }), - }, - }, - expectedCBCallCount: 1, - expectedCBError: xerrors.Errorf("handling applied event: deal wasn't active: deal=%d, parentState=bafkqaaa, h=5", dealID), - }, - } - runTestCase := func(testCase string, data testCase) { - t.Run(testCase, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - checkTs, err := test.MockTipset(provider, rand.Uint64()) - require.NoError(t, err) - matchMessages := make([]matchMessage, len(data.matchStates)) - for i, ms := range data.matchStates { - matchTs, err := test.MockTipset(provider, rand.Uint64()) - require.NoError(t, err) - matchMessages[i] = matchMessage{ - curH: 5, - msg: ms.msg, - msgReceipt: ms.receipt, - ts: matchTs, - } - } - eventsAPI := &fakeEvents{ - Ctx: ctx, - CheckTs: checkTs, - MatchMessages: matchMessages, - DealStartEpochTimeout: data.dealStartEpochTimeout, - } - cbCallCount := uint64(0) - var cbError error - cb := func(err error) { - cbCallCount++ - cbError = err - } - mockPCAPI := &mockPreCommitsAPI{} - mockDIAPI := &mockDealInfoAPI{ - CurrentDealInfo: data.currentDealInfo, - CurrentDealInfo2: data.currentDealInfo2, - Err: data.currentDealInfoErr, - Err2: data.currentDealInfoErr2, - } - scm := newSectorCommittedManager(eventsAPI, mockDIAPI, mockPCAPI) - //stm: @MARKET_ADAPTER_ON_SECTOR_COMMIT_001 - err = scm.OnDealSectorCommitted(ctx, provider, sectorNumber, proposal, publishCid, cb) - if data.expectedError == nil { - require.NoError(t, err) - } else { - require.EqualError(t, err, data.expectedError.Error()) - } - require.Equal(t, data.expectedCBCallCount, cbCallCount) - if data.expectedCBError == nil { - require.NoError(t, cbError) - } else { - require.EqualError(t, cbError, data.expectedCBError.Error()) - } - }) - } - for testCase, data := range testCases { - runTestCase(testCase, data) - } -} - -type matchState struct { - msg *types.Message - receipt *types.MessageReceipt -} - -type matchMessage struct { - curH abi.ChainEpoch - msg *types.Message - msgReceipt *types.MessageReceipt - ts *types.TipSet - doesRevert bool -} -type fakeEvents struct { - Ctx context.Context - CheckTs *types.TipSet - MatchMessages []matchMessage - DealStartEpochTimeout bool -} - -func (fe *fakeEvents) Called(ctx context.Context, check events.CheckFunc, msgHnd events.MsgHandler, rev events.RevertHandler, confidence int, timeout abi.ChainEpoch, mf events.MsgMatchFunc) error { - if fe.DealStartEpochTimeout { - msgHnd(nil, nil, nil, 100) // nolint:errcheck - return nil - } - - _, more, err := check(ctx, fe.CheckTs) - if err != nil { - return err - } - if !more { - return nil - } - for _, matchMessage := range fe.MatchMessages { - matched, err := mf(matchMessage.msg) - if err != nil { - return err - } - if matched { - receipt := matchMessage.msgReceipt - if receipt == nil { - receipt = &types.MessageReceipt{ExitCode: 0} - } - more, err := msgHnd(matchMessage.msg, receipt, matchMessage.ts, matchMessage.curH) - if err != nil { - // error is handled through a callback rather than being returned - return nil - } - if matchMessage.doesRevert { - err := rev(ctx, matchMessage.ts) - if err != nil { - return err - } - } - if !more { - return nil - } - } - } - return nil -} - -func makeMessage(t *testing.T, to address.Address, method abi.MethodNum, params cbor.Marshaler) *types.Message { - buf := new(bytes.Buffer) - err := params.MarshalCBOR(buf) - require.NoError(t, err) - return &types.Message{ - To: to, - Method: method, - Params: buf.Bytes(), - } -} - -var seq int - -func generateCids(n int) []cid.Cid { - cids := make([]cid.Cid, 0, n) - for i := 0; i < n; i++ { - c := blocks.NewBlock([]byte(fmt.Sprint(seq))).Cid() - seq++ - cids = append(cids, c) - } - return cids -} - -type mockPreCommitsAPI struct { - PCChanges *miner.PreCommitChanges - Err error -} - -func (m *mockPreCommitsAPI) diffPreCommits(ctx context.Context, actor address.Address, pre, cur types.TipSetKey) (*miner.PreCommitChanges, error) { - pcc := &miner.PreCommitChanges{} - if m.PCChanges != nil { - pcc = m.PCChanges - } - return pcc, m.Err -} - -type mockDealInfoAPI struct { - count int - CurrentDealInfo pipeline.CurrentDealInfo - Err error - CurrentDealInfo2 pipeline.CurrentDealInfo - Err2 error -} - -func (m *mockDealInfoAPI) GetCurrentDealInfo(ctx context.Context, tsk types.TipSetKey, proposal *market.DealProposal, publishCid cid.Cid) (pipeline.CurrentDealInfo, error) { - m.count++ - if m.count == 2 { - return m.CurrentDealInfo2, m.Err2 - } - return m.CurrentDealInfo, m.Err -} diff --git a/markets/storageadapter/provider.go b/markets/storageadapter/provider.go deleted file mode 100644 index 392f5eb81..000000000 --- a/markets/storageadapter/provider.go +++ /dev/null @@ -1,438 +0,0 @@ -package storageadapter - -// this file implements storagemarket.StorageProviderNode - -import ( - "context" - "errors" - "time" - - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - "go.uber.org/fx" - "golang.org/x/xerrors" - - "github.com/filecoin-project/boost-gfm/shared" - "github.com/filecoin-project/boost-gfm/storagemarket" - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market" - "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/go-state-types/exitcode" - - "github.com/filecoin-project/boost/markets/utils" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/v1api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/builtin/market" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/events" - "github.com/filecoin-project/lotus/chain/events/state" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/sigs" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/node/modules/helpers" - pipeline "github.com/filecoin-project/lotus/storage/pipeline" - "github.com/filecoin-project/lotus/storage/sectorblocks" -) - -var addPieceRetryWait = 5 * time.Minute -var addPieceRetryTimeout = 6 * time.Hour -var defaultMaxProviderCollateralMultiplier = uint64(2) -var log = logging.Logger("storageadapter") - -type ProviderNodeAdapter struct { - v1api.FullNode - - secb *sectorblocks.SectorBlocks - ev *events.Events - - dealPublisher *DealPublisher - - addBalanceSpec *api.MessageSendSpec - maxDealCollateralMultiplier uint64 - dsMatcher *dealStateMatcher - scMgr *SectorCommittedManager -} - -func NewProviderNodeAdapter(fc *config.MinerFeeConfig, dc *config.DealmakingConfig) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, secb *sectorblocks.SectorBlocks, full v1api.FullNode, dealPublisher *DealPublisher) (storagemarket.StorageProviderNode, error) { - return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, secb *sectorblocks.SectorBlocks, full v1api.FullNode, dealPublisher *DealPublisher) (storagemarket.StorageProviderNode, error) { - ctx := helpers.LifecycleCtx(mctx, lc) - - ev, err := events.NewEvents(ctx, full) - if err != nil { - return nil, err - } - na := &ProviderNodeAdapter{ - FullNode: full, - - secb: secb, - ev: ev, - dealPublisher: dealPublisher, - dsMatcher: newDealStateMatcher(state.NewStatePredicates(state.WrapFastAPI(full))), - } - if fc != nil { - na.addBalanceSpec = &api.MessageSendSpec{MaxFee: abi.TokenAmount(fc.MaxMarketBalanceAddFee)} - } - na.maxDealCollateralMultiplier = defaultMaxProviderCollateralMultiplier - if dc != nil { - na.maxDealCollateralMultiplier = dc.MaxProviderCollateralMultiplier - } - na.scMgr = NewSectorCommittedManager(ev, na, &apiWrapper{api: full}) - - return na, nil - } -} - -func (n *ProviderNodeAdapter) PublishDeals(ctx context.Context, deal storagemarket.MinerDeal) (cid.Cid, error) { - return n.dealPublisher.Publish(ctx, deal.ClientDealProposal) -} - -func (n *ProviderNodeAdapter) OnDealComplete(ctx context.Context, deal storagemarket.MinerDeal, pieceSize abi.UnpaddedPieceSize, pieceData shared.ReadSeekStarter) (*storagemarket.PackingResult, error) { - if deal.PublishCid == nil { - return nil, xerrors.Errorf("deal.PublishCid can't be nil") - } - - sdInfo := api.PieceDealInfo{ - DealID: deal.DealID, - DealProposal: &deal.Proposal, - PublishCid: deal.PublishCid, - DealSchedule: api.DealSchedule{ - StartEpoch: deal.ClientDealProposal.Proposal.StartEpoch, - EndEpoch: deal.ClientDealProposal.Proposal.EndEpoch, - }, - KeepUnsealed: deal.FastRetrieval, - } - - // Attempt to add the piece to the sector - p, offset, err := n.secb.AddPiece(ctx, pieceSize, pieceData, sdInfo) - curTime := build.Clock.Now() - for build.Clock.Since(curTime) < addPieceRetryTimeout { - // Check if there was an error because of too many sectors being sealed - if !errors.Is(err, pipeline.ErrTooManySectorsSealing) { - if err != nil { - log.Errorf("failed to addPiece for deal %d, err: %v", deal.DealID, err) - } - - // There was either a fatal error or no error. In either case - // don't retry AddPiece - break - } - - // The piece could not be added to the sector because there are too - // many sectors being sealed, back-off for a while before trying again - select { - case <-build.Clock.After(addPieceRetryWait): - // Reset the reader to the start - err = pieceData.SeekStart() - if err != nil { - return nil, xerrors.Errorf("failed to reset piece reader to start before retrying AddPiece for deal %d: %w", deal.DealID, err) - } - - // Attempt to add the piece again - p, offset, err = n.secb.AddPiece(ctx, pieceSize, pieceData, sdInfo) - case <-ctx.Done(): - return nil, xerrors.New("context expired while waiting to retry AddPiece") - } - } - - if err != nil { - return nil, xerrors.Errorf("AddPiece failed: %s", err) - } - log.Warnf("New Deal: deal %d", deal.DealID) - - return &storagemarket.PackingResult{ - SectorNumber: p, - Offset: offset, - Size: pieceSize.Padded(), - }, nil -} - -func (n *ProviderNodeAdapter) VerifySignature(ctx context.Context, sig crypto.Signature, addr address.Address, input []byte, encodedTs shared.TipSetToken) (bool, error) { - addr, err := n.StateAccountKey(ctx, addr, types.EmptyTSK) - if err != nil { - return false, err - } - - err = sigs.Verify(&sig, addr, input) - return err == nil, err -} - -func (n *ProviderNodeAdapter) GetMinerWorkerAddress(ctx context.Context, maddr address.Address, tok shared.TipSetToken) (address.Address, error) { - tsk, err := types.TipSetKeyFromBytes(tok) - if err != nil { - return address.Undef, err - } - - mi, err := n.StateMinerInfo(ctx, maddr, tsk) - if err != nil { - return address.Address{}, err - } - return mi.Worker, nil -} - -func (n *ProviderNodeAdapter) GetProofType(ctx context.Context, maddr address.Address, tok shared.TipSetToken) (abi.RegisteredSealProof, error) { - tsk, err := types.TipSetKeyFromBytes(tok) - if err != nil { - return 0, err - } - - mi, err := n.StateMinerInfo(ctx, maddr, tsk) - if err != nil { - return 0, err - } - - nver, err := n.StateNetworkVersion(ctx, tsk) - if err != nil { - return 0, err - } - - return miner.PreferredSealProofTypeFromWindowPoStType(nver, mi.WindowPoStProofType) -} - -func (n *ProviderNodeAdapter) SignBytes(ctx context.Context, signer address.Address, b []byte) (*crypto.Signature, error) { - signer, err := n.StateAccountKey(ctx, signer, types.EmptyTSK) - if err != nil { - return nil, err - } - - localSignature, err := n.WalletSign(ctx, signer, b) - if err != nil { - return nil, err - } - return localSignature, nil -} - -func (n *ProviderNodeAdapter) ReserveFunds(ctx context.Context, wallet, addr address.Address, amt abi.TokenAmount) (cid.Cid, error) { - return n.MarketReserveFunds(ctx, wallet, addr, amt) -} - -func (n *ProviderNodeAdapter) ReleaseFunds(ctx context.Context, addr address.Address, amt abi.TokenAmount) error { - return n.MarketReleaseFunds(ctx, addr, amt) -} - -// Adds funds with the StorageMinerActor for a storage participant. Used by both providers and clients. -func (n *ProviderNodeAdapter) AddFunds(ctx context.Context, addr address.Address, amount abi.TokenAmount) (cid.Cid, error) { - // (Provider Node API) - smsg, err := n.MpoolPushMessage(ctx, &types.Message{ - To: market.Address, - From: addr, - Value: amount, - Method: market.Methods.AddBalance, - }, n.addBalanceSpec) - if err != nil { - return cid.Undef, err - } - - return smsg.Cid(), nil -} - -func (n *ProviderNodeAdapter) GetBalance(ctx context.Context, addr address.Address, encodedTs shared.TipSetToken) (storagemarket.Balance, error) { - tsk, err := types.TipSetKeyFromBytes(encodedTs) - if err != nil { - return storagemarket.Balance{}, err - } - - bal, err := n.StateMarketBalance(ctx, addr, tsk) - if err != nil { - return storagemarket.Balance{}, err - } - - return utils.ToSharedBalance(bal), nil -} - -// TODO: why doesnt this method take in a sector ID? -func (n *ProviderNodeAdapter) LocatePieceForDealWithinSector(ctx context.Context, dealID abi.DealID, encodedTs shared.TipSetToken) (sectorID abi.SectorNumber, offset abi.PaddedPieceSize, length abi.PaddedPieceSize, err error) { - refs, err := n.secb.GetRefs(ctx, dealID) - if err != nil { - return 0, 0, 0, err - } - if len(refs) == 0 { - return 0, 0, 0, xerrors.New("no sector information for deal ID") - } - - // TODO: better strategy (e.g. look for already unsealed) - var best api.SealedRef - var bestSi api.SectorInfo - for _, r := range refs { - si, err := n.secb.SectorBuilder.SectorsStatus(ctx, r.SectorID, false) - if err != nil { - return 0, 0, 0, xerrors.Errorf("getting sector info: %w", err) - } - if si.State == api.SectorState(pipeline.Proving) { - best = r - bestSi = si - break - } - } - if bestSi.State == api.SectorState(pipeline.UndefinedSectorState) { - return 0, 0, 0, xerrors.New("no sealed sector found") - } - return best.SectorID, best.Offset, best.Size.Padded(), nil -} - -func (n *ProviderNodeAdapter) DealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, isVerified bool) (abi.TokenAmount, abi.TokenAmount, error) { - bounds, err := n.StateDealProviderCollateralBounds(ctx, size, isVerified, types.EmptyTSK) - if err != nil { - return abi.TokenAmount{}, abi.TokenAmount{}, err - } - - // The maximum amount of collateral that the provider will put into escrow - // for a deal is calculated as a multiple of the minimum bounded amount - max := types.BigMul(bounds.Min, types.NewInt(n.maxDealCollateralMultiplier)) - - return bounds.Min, max, nil -} - -// TODO: Remove dealID parameter, change publishCid to be cid.Cid (instead of pointer) -func (n *ProviderNodeAdapter) OnDealSectorPreCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, proposal markettypes.DealProposal, publishCid *cid.Cid, cb storagemarket.DealSectorPreCommittedCallback) error { - return n.scMgr.OnDealSectorPreCommitted(ctx, provider, proposal, *publishCid, cb) -} - -// TODO: Remove dealID parameter, change publishCid to be cid.Cid (instead of pointer) -func (n *ProviderNodeAdapter) OnDealSectorCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, sectorNumber abi.SectorNumber, proposal markettypes.DealProposal, publishCid *cid.Cid, cb storagemarket.DealSectorCommittedCallback) error { - return n.scMgr.OnDealSectorCommitted(ctx, provider, sectorNumber, proposal, *publishCid, cb) -} - -func (n *ProviderNodeAdapter) GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) { - head, err := n.ChainHead(ctx) - if err != nil { - return nil, 0, err - } - - return head.Key().Bytes(), head.Height(), nil -} - -func (n *ProviderNodeAdapter) WaitForMessage(ctx context.Context, mcid cid.Cid, cb func(code exitcode.ExitCode, bytes []byte, finalCid cid.Cid, err error) error) error { - receipt, err := n.StateWaitMsg(ctx, mcid, 2*build.MessageConfidence, api.LookbackNoLimit, true) - if err != nil { - return cb(0, nil, cid.Undef, err) - } - return cb(receipt.Receipt.ExitCode, receipt.Receipt.Return, receipt.Message, nil) -} - -func (n *ProviderNodeAdapter) WaitForPublishDeals(ctx context.Context, publishCid cid.Cid, proposal markettypes.DealProposal) (*storagemarket.PublishDealsWaitResult, error) { - // Wait for deal to be published (plus additional time for confidence) - receipt, err := n.StateWaitMsg(ctx, publishCid, 2*build.MessageConfidence, api.LookbackNoLimit, true) - if err != nil { - return nil, xerrors.Errorf("WaitForPublishDeals errored: %w", err) - } - if receipt.Receipt.ExitCode != exitcode.Ok { - return nil, xerrors.Errorf("WaitForPublishDeals exit code: %s", receipt.Receipt.ExitCode) - } - - // The deal ID may have changed since publish if there was a reorg, so - // get the current deal ID - head, err := n.ChainHead(ctx) - if err != nil { - return nil, xerrors.Errorf("WaitForPublishDeals failed to get chain head: %w", err) - } - - res, err := n.scMgr.dealInfo.GetCurrentDealInfo(ctx, head.Key(), &proposal, publishCid) - if err != nil { - return nil, xerrors.Errorf("WaitForPublishDeals getting deal info errored: %w", err) - } - - return &storagemarket.PublishDealsWaitResult{DealID: res.DealID, FinalCid: receipt.Message}, nil -} - -func (n *ProviderNodeAdapter) GetDataCap(ctx context.Context, addr address.Address, encodedTs shared.TipSetToken) (*abi.StoragePower, error) { - tsk, err := types.TipSetKeyFromBytes(encodedTs) - if err != nil { - return nil, err - } - - sp, err := n.StateVerifiedClientStatus(ctx, addr, tsk) - return sp, err -} - -func (n *ProviderNodeAdapter) OnDealExpiredOrSlashed(ctx context.Context, dealID abi.DealID, onDealExpired storagemarket.DealExpiredCallback, onDealSlashed storagemarket.DealSlashedCallback) error { - head, err := n.ChainHead(ctx) - if err != nil { - return xerrors.Errorf("client: failed to get chain head: %w", err) - } - - sd, err := n.StateMarketStorageDeal(ctx, dealID, head.Key()) - if err != nil { - return xerrors.Errorf("client: failed to look up deal %d on chain: %w", dealID, err) - } - - // Called immediately to check if the deal has already expired or been slashed - checkFunc := func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error) { - if ts == nil { - // keep listening for events - return false, true, nil - } - - // Check if the deal has already expired - if sd.Proposal.EndEpoch <= ts.Height() { - onDealExpired(nil) - return true, false, nil - } - - // If there is no deal assume it's already been slashed - if sd.State.SectorStartEpoch < 0 { - onDealSlashed(ts.Height(), nil) - return true, false, nil - } - - // No events have occurred yet, so return - // done: false, more: true (keep listening for events) - return false, true, nil - } - - // Called when there was a match against the state change we're looking for - // and the chain has advanced to the confidence height - stateChanged := func(ts *types.TipSet, ts2 *types.TipSet, states events.StateChange, h abi.ChainEpoch) (more bool, err error) { - // Check if the deal has already expired - if ts2 == nil || sd.Proposal.EndEpoch <= ts2.Height() { - onDealExpired(nil) - return false, nil - } - - // Timeout waiting for state change - if states == nil { - log.Error("timed out waiting for deal expiry") - return false, nil - } - - changedDeals, ok := states.(state.ChangedDeals) - if !ok { - panic("Expected state.ChangedDeals") - } - - deal, ok := changedDeals[dealID] - if !ok { - // No change to deal - return true, nil - } - - // Deal was slashed - if deal.To == nil { - onDealSlashed(ts2.Height(), nil) - return false, nil - } - - return true, nil - } - - // Called when there was a chain reorg and the state change was reverted - revert := func(ctx context.Context, ts *types.TipSet) error { - // TODO: Is it ok to just ignore this? - log.Warn("deal state reverted; TODO: actually handle this!") - return nil - } - - // Watch for state changes to the deal - match := n.dsMatcher.matcher(ctx, dealID) - - // Wait until after the end epoch for the deal and then timeout - timeout := (sd.Proposal.EndEpoch - head.Height()) + 1 - if err := n.ev.StateChanged(checkFunc, stateChanged, revert, int(build.MessageConfidence)+1, timeout, match); err != nil { - return xerrors.Errorf("failed to set up state changed handler: %w", err) - } - - return nil -} - -var _ storagemarket.StorageProviderNode = &ProviderNodeAdapter{} diff --git a/markets/utils/converters.go b/markets/utils/converters.go index 3ff0b0475..c6fc899b7 100644 --- a/markets/utils/converters.go +++ b/markets/utils/converters.go @@ -1,10 +1,10 @@ package utils import ( + "github.com/filecoin-project/boost/storagemarket/types/legacytypes" "github.com/libp2p/go-libp2p/core/peer" "github.com/multiformats/go-multiaddr" - "github.com/filecoin-project/boost-gfm/storagemarket" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" @@ -12,17 +12,17 @@ import ( "github.com/filecoin-project/lotus/api" ) -func NewStorageProviderInfo(address address.Address, miner address.Address, sectorSize abi.SectorSize, peer peer.ID, addrs []abi.Multiaddrs) storagemarket.StorageProviderInfo { +func NewStorageProviderInfo(address address.Address, miner address.Address, sectorSize abi.SectorSize, peer peer.ID, addrs []abi.Multiaddrs) legacytypes.StorageProviderInfo { multiaddrs := make([]multiaddr.Multiaddr, 0, len(addrs)) for _, a := range addrs { maddr, err := multiaddr.NewMultiaddrBytes(a) if err != nil { - return storagemarket.StorageProviderInfo{} + return legacytypes.StorageProviderInfo{} } multiaddrs = append(multiaddrs, maddr) } - return storagemarket.StorageProviderInfo{ + return legacytypes.StorageProviderInfo{ Address: address, Worker: miner, SectorSize: uint64(sectorSize), @@ -31,8 +31,8 @@ func NewStorageProviderInfo(address address.Address, miner address.Address, sect } } -func ToSharedBalance(bal api.MarketBalance) storagemarket.Balance { - return storagemarket.Balance{ +func ToSharedBalance(bal api.MarketBalance) legacytypes.Balance { + return legacytypes.Balance{ Locked: bal.Locked, Available: big.Sub(bal.Escrow, bal.Locked), } diff --git a/node/builder.go b/node/builder.go index 8436a913f..836886253 100644 --- a/node/builder.go +++ b/node/builder.go @@ -7,11 +7,6 @@ import ( "fmt" "time" - "github.com/filecoin-project/boost-gfm/retrievalmarket" - rmnet "github.com/filecoin-project/boost-gfm/retrievalmarket/network" - gfm_storagemarket "github.com/filecoin-project/boost-gfm/storagemarket" - storageimpl "github.com/filecoin-project/boost-gfm/storagemarket/impl" - "github.com/filecoin-project/boost-gfm/storagemarket/impl/storedask" "github.com/filecoin-project/boost/api" "github.com/filecoin-project/boost/build" "github.com/filecoin-project/boost/cmd/lib" @@ -22,7 +17,6 @@ import ( "github.com/filecoin-project/boost/lib/legacy" "github.com/filecoin-project/boost/lib/mpoolmonitor" "github.com/filecoin-project/boost/markets/idxprov" - "github.com/filecoin-project/boost/markets/retrievaladapter" "github.com/filecoin-project/boost/markets/storageadapter" "github.com/filecoin-project/boost/node/config" "github.com/filecoin-project/boost/node/impl" @@ -41,13 +35,14 @@ import ( "github.com/filecoin-project/boost/storagemarket" "github.com/filecoin-project/boost/storagemarket/dealfilter" "github.com/filecoin-project/boost/storagemarket/sealingpipeline" + "github.com/filecoin-project/boost/storagemarket/storedask" smtypes "github.com/filecoin-project/boost/storagemarket/types" bdclient "github.com/filecoin-project/boostd-data/client" "github.com/filecoin-project/boostd-data/shared/tracing" "github.com/filecoin-project/dagstore" "github.com/filecoin-project/go-address" - lotus_gfm_storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-statemachine/fsm" lotus_api "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/types" lotus_journal "github.com/filecoin-project/lotus/journal" @@ -55,7 +50,6 @@ import ( _ "github.com/filecoin-project/lotus/lib/sigs/bls" _ "github.com/filecoin-project/lotus/lib/sigs/secp" mdagstore "github.com/filecoin-project/lotus/markets/dagstore" - lotus_dealfilter "github.com/filecoin-project/lotus/markets/dealfilter" lotus_config "github.com/filecoin-project/lotus/node/config" lotus_common "github.com/filecoin-project/lotus/node/impl/common" lotus_net "github.com/filecoin-project/lotus/node/impl/net" @@ -439,6 +433,7 @@ var BoostNode = Options( Override(new(*db.ProposalLogsDB), modules.NewProposalLogsDB), Override(new(*db.FundsDB), modules.NewFundsDB), Override(new(*db.SectorStateDB), modules.NewSectorStateDB), + Override(new(*db.StorageAskDB), modules.NewAskDB), Override(new(*rtvllog.RetrievalLogDB), modules.NewRetrievalLogDB), ) @@ -520,7 +515,7 @@ func ConfigBoost(cfg *config.Boost) Option { Override(new(*sectorstatemgr.SectorStateMgr), sectorstatemgr.NewSectorStateMgr(cfg)), Override(new(*indexprovider.Wrapper), indexprovider.NewWrapper(cfg)), - Override(new(*legacy.LegacyDealsManager), modules.NewLegacyDealsManager), + Override(new(legacy.LegacyDealManager), modules.NewLegacyDealsManager), Override(new(*storagemarket.ChainDealManager), modules.NewChainDealManager), Override(new(smtypes.CommpCalculator), From(new(lotus_modules.MinerStorageService))), @@ -539,27 +534,19 @@ func ConfigBoost(cfg *config.Boost) Option { DealPublishControl: []string{cfg.Wallets.PublishStorageDeals}, })), + Override(new(smtypes.AskGetter), storedask.NewStoredAsk(cfg)), + // Lotus Markets Override(new(dtypes.ProviderTransferNetwork), modules.NewProviderTransferNetwork), - Override(new(*modules.ProxyAskGetter), modules.NewAskGetter), - Override(new(server.AskGetter), From(new(*modules.ProxyAskGetter))), Override(new(*modules.LinkSystemProv), modules.NewLinkSystemProvider), Override(new(server.LinkSystemProvider), From(new(*modules.LinkSystemProv))), Override(new(*server.GraphsyncUnpaidRetrieval), modules.RetrievalGraphsync(cfg.LotusDealmaking.SimultaneousTransfersForStorage, cfg.LotusDealmaking.SimultaneousTransfersForStoragePerClient, cfg.LotusDealmaking.SimultaneousTransfersForRetrieval)), Override(new(dtypes.StagingGraphsync), From(new(*server.GraphsyncUnpaidRetrieval))), - Override(new(dtypes.ProviderPieceStore), modules.NewProviderPieceStore), Override(StartPieceDoctorKey, modules.NewPieceDoctor), // Lotus Markets (retrieval deps) Override(new(sealer.PieceProvider), sealer.NewPieceProvider), - Override(new(dtypes.RetrievalPricingFunc), modules.RetrievalPricingFunc(config.DealmakingConfig{ - RetrievalPricing: &lotus_config.RetrievalPricing{ - Strategy: config.RetrievalPricingDefaultMode, - Default: &lotus_config.RetrievalPricingDefault{}, - }, - })), - // DAG Store // TODO: Not sure how to completely get rid of these yet: @@ -570,22 +557,12 @@ func ConfigBoost(cfg *config.Boost) Option { Override(new(*bdclient.Store), modules.NewPieceDirectoryStore(cfg)), Override(new(*lib.MultiMinerAccessor), modules.NewMultiminerSectorAccessor(cfg)), Override(new(*piecedirectory.PieceDirectory), modules.NewPieceDirectory(cfg)), - Override(DAGStoreKey, modules.NewDAGStoreWrapper), Override(new(dagstore.Interface), From(new(*dagstore.DAGStore))), - Override(new(*modules.ShardSelector), modules.NewShardSelector), - Override(new(dtypes.IndexBackedBlockstore), modules.NewIndexBackedBlockstore(cfg)), - Override(HandleSetShardSelector, modules.SetShardSelectorFunc), - // Lotus Markets (retrieval) - Override(new(mdagstore.SectorAccessor), modules.NewSectorAccessor(cfg)), - Override(new(retrievalmarket.SectorAccessor), From(new(mdagstore.SectorAccessor))), - Override(new(retrievalmarket.RetrievalProviderNode), retrievaladapter.NewRetrievalProviderNode), - Override(new(rmnet.RetrievalMarketNetwork), modules.RetrievalNetwork), - Override(new(retrievalmarket.RetrievalProvider), modules.RetrievalProvider), - Override(HandleSetRetrievalAskGetter, modules.SetAskGetter), + Override(new(server.SectorAccessor), modules.NewSectorAccessor(cfg)), + Override(HandleSetRetrievalAskGetter, modules.NewRetrievalAskGetter), Override(HandleRetrievalEventsKey, modules.HandleRetrievalGraphsyncUpdates(time.Duration(cfg.Dealmaking.RetrievalLogDuration), time.Duration(cfg.Dealmaking.StalledRetrievalTimeout))), - Override(HandleRetrievalKey, modules.HandleRetrieval), Override(HandleRetrievalAskKey, modules.HandleQueryAsk), Override(new(*lp2pimpl.TransportsListener), modules.NewTransportsListener(cfg)), Override(new(*protocolproxy.ProtocolProxy), modules.NewProtocolProxy(cfg)), @@ -596,16 +573,11 @@ func ConfigBoost(cfg *config.Boost) Option { // Lotus Markets (storage) Override(new(dtypes.ProviderTransport), modules.NewProviderTransport), - Override(new(dtypes.ProviderDataTransfer), modules.NewProviderDataTransfer), - Override(new(*storedask.StoredAsk), modules.NewStorageAsk), - Override(new(gfm_storagemarket.StorageProviderNode), storageadapter.NewProviderNodeAdapter(&legacyFees, &cfg.LotusDealmaking)), - Override(new(gfm_storagemarket.StorageProvider), modules.NewLegacyStorageProvider(cfg)), - Override(HandleDealsKey, modules.HandleLegacyDeals), + Override(new(fsm.Group), modules.NewLegacyDealsFSM(cfg)), Override(HandleBoostDealsKey, modules.HandleBoostLibp2pDeals(cfg)), Override(HandleContractDealsKey, modules.HandleContractDeals(&cfg.ContractDeals)), Override(HandleProposalLogCleanerKey, modules.HandleProposalLogCleaner(time.Duration(cfg.Dealmaking.DealProposalLogDuration))), - Override(HandleSetLinkSystem, modules.SetLinkSystem), // Boost storage deal filter Override(new(dtypes.StorageDealFilter), modules.BasicDealFilter(nil)), @@ -613,26 +585,12 @@ func ConfigBoost(cfg *config.Boost) Option { Override(new(dtypes.StorageDealFilter), modules.BasicDealFilter(dtypes.StorageDealFilter(dealfilter.CliStorageDealFilter(cfg.Dealmaking.Filter)))), ), - // Lotus markets storage deal filter - Override(new(lotus_dtypes.StorageDealFilter), lotus_modules.BasicDealFilter(cfg.LotusDealmaking, nil)), - If(cfg.LotusDealmaking.Filter != "", - Override(new(lotus_dtypes.StorageDealFilter), lotus_modules.BasicDealFilter(cfg.LotusDealmaking, lotus_dealfilter.CliStorageDealFilter(cfg.LotusDealmaking.Filter))), - ), - Override(new(storageimpl.DealDeciderFunc), modules.DealDeciderFn), - // Boost retrieval deal filter Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(nil)), If(cfg.Dealmaking.RetrievalFilter != "", Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(dtypes.RetrievalDealFilter(dealfilter.CliRetrievalDealFilter(cfg.Dealmaking.RetrievalFilter)))), ), - // Lotus markets retrieval deal filter - Override(new(lotus_gfm_storagemarket.StorageProviderNode), modules.LotusGFMStorageProviderNode), - Override(new(lotus_dtypes.RetrievalDealFilter), lotus_modules.RetrievalDealFilter(nil)), - If(cfg.LotusDealmaking.RetrievalFilter != "", - Override(new(lotus_dtypes.RetrievalDealFilter), lotus_modules.RetrievalDealFilter(lotus_dealfilter.CliRetrievalDealFilter(cfg.LotusDealmaking.RetrievalFilter))), - ), - Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(&legacyFees, storageadapter.PublishMsgConfig{ Period: time.Duration(cfg.LotusDealmaking.PublishMsgPeriod), MaxDealsPerMsg: cfg.LotusDealmaking.MaxDealsPerPublishMsg, @@ -649,26 +607,6 @@ func ConfigBoost(cfg *config.Boost) Option { Override(new(sealer.StorageAuth), lotus_modules.StorageAuthWithURL(cfg.SectorIndexApiInfo)), Override(new(*backupmgr.BackupMgr), modules.NewOnlineBackupMgr(cfg)), - // Dynamic Lotus configs - Override(new(lotus_dtypes.ConsiderOnlineStorageDealsConfigFunc), lotus_modules.NewConsiderOnlineStorageDealsConfigFunc), - Override(new(lotus_dtypes.SetConsiderOnlineStorageDealsConfigFunc), lotus_modules.NewSetConsideringOnlineStorageDealsFunc), - Override(new(lotus_dtypes.ConsiderOnlineRetrievalDealsConfigFunc), lotus_modules.NewConsiderOnlineRetrievalDealsConfigFunc), - Override(new(lotus_dtypes.SetConsiderOnlineRetrievalDealsConfigFunc), lotus_modules.NewSetConsiderOnlineRetrievalDealsConfigFunc), - Override(new(lotus_dtypes.StorageDealPieceCidBlocklistConfigFunc), lotus_modules.NewStorageDealPieceCidBlocklistConfigFunc), - Override(new(lotus_dtypes.SetStorageDealPieceCidBlocklistConfigFunc), lotus_modules.NewSetStorageDealPieceCidBlocklistConfigFunc), - Override(new(lotus_dtypes.ConsiderOfflineStorageDealsConfigFunc), lotus_modules.NewConsiderOfflineStorageDealsConfigFunc), - Override(new(lotus_dtypes.SetConsiderOfflineStorageDealsConfigFunc), lotus_modules.NewSetConsideringOfflineStorageDealsFunc), - Override(new(lotus_dtypes.ConsiderOfflineRetrievalDealsConfigFunc), lotus_modules.NewConsiderOfflineRetrievalDealsConfigFunc), - Override(new(lotus_dtypes.SetConsiderOfflineRetrievalDealsConfigFunc), lotus_modules.NewSetConsiderOfflineRetrievalDealsConfigFunc), - Override(new(lotus_dtypes.ConsiderVerifiedStorageDealsConfigFunc), lotus_modules.NewConsiderVerifiedStorageDealsConfigFunc), - Override(new(lotus_dtypes.SetConsiderVerifiedStorageDealsConfigFunc), lotus_modules.NewSetConsideringVerifiedStorageDealsFunc), - Override(new(lotus_dtypes.ConsiderUnverifiedStorageDealsConfigFunc), lotus_modules.NewConsiderUnverifiedStorageDealsConfigFunc), - Override(new(lotus_dtypes.SetConsiderUnverifiedStorageDealsConfigFunc), lotus_modules.NewSetConsideringUnverifiedStorageDealsFunc), - Override(new(lotus_dtypes.SetExpectedSealDurationFunc), lotus_modules.NewSetExpectedSealDurationFunc), - Override(new(lotus_dtypes.GetExpectedSealDurationFunc), lotus_modules.NewGetExpectedSealDurationFunc), - Override(new(lotus_dtypes.SetMaxDealStartDelayFunc), lotus_modules.NewSetMaxDealStartDelayFunc), - Override(new(lotus_dtypes.GetMaxDealStartDelayFunc), lotus_modules.NewGetMaxDealStartDelayFunc), - // Dynamic Boost configs Override(new(dtypes.ConsiderOnlineStorageDealsConfigFunc), modules.NewConsiderOnlineStorageDealsConfigFunc), Override(new(dtypes.SetConsiderOnlineStorageDealsConfigFunc), modules.NewSetConsideringOnlineStorageDealsFunc), diff --git a/node/impl/boost.go b/node/impl/boost.go index ca85cbae3..dc03e6b39 100644 --- a/node/impl/boost.go +++ b/node/impl/boost.go @@ -4,36 +4,28 @@ import ( "context" "encoding/json" "errors" - "fmt" "io" "net/http" - "sort" + "github.com/filecoin-project/boost/lib/legacy" "github.com/filecoin-project/boost/node/impl/backupmgr" "github.com/filecoin-project/boost/piecedirectory" "github.com/multiformats/go-multihash" "go.opentelemetry.io/otel/attribute" - "github.com/filecoin-project/boost-gfm/retrievalmarket" - gfm_storagemarket "github.com/filecoin-project/boost-gfm/storagemarket" "github.com/filecoin-project/boost/api" "github.com/filecoin-project/boost/gql" "github.com/filecoin-project/boost/indexprovider" "github.com/filecoin-project/boost/markets/storageadapter" - "github.com/filecoin-project/boost/node/modules/dtypes" retmarket "github.com/filecoin-project/boost/retrievalmarket/server" "github.com/filecoin-project/boost/storagemarket" "github.com/filecoin-project/boost/storagemarket/sealingpipeline" "github.com/filecoin-project/boost/storagemarket/types" "github.com/filecoin-project/boostd-data/shared/tracing" - "github.com/filecoin-project/dagstore" - "github.com/filecoin-project/dagstore/shard" "github.com/filecoin-project/go-jsonrpc/auth" lapi "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/gateway" - mktsdagstore "github.com/filecoin-project/lotus/markets/dagstore" lotus_dtypes "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/storage/sectorblocks" "github.com/google/uuid" "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log/v2" @@ -54,23 +46,12 @@ type BoostAPI struct { Host host.Host - DAGStore *dagstore.DAGStore - DagStoreWrapper *mktsdagstore.Wrapper - IndexBackedBlockstore dtypes.IndexBackedBlockstore // Boost StorageProvider *storagemarket.Provider IndexProvider *indexprovider.Wrapper - // Legacy Lotus - LegacyStorageProvider gfm_storagemarket.StorageProvider - - // Lotus Markets - SectorBlocks *sectorblocks.SectorBlocks - PieceStore dtypes.ProviderPieceStore - DataTransfer dtypes.ProviderDataTransfer - - RetrievalProvider retrievalmarket.RetrievalProvider - SectorAccessor retrievalmarket.SectorAccessor + // Legacy Markets + LegacyDealManager legacy.LegacyDealManager DealPublisher *storageadapter.DealPublisher // Graphsync Unpaid Retrieval @@ -185,347 +166,6 @@ func (sm *BoostAPI) BoostOfflineDealWithData(ctx context.Context, dealUuid uuid. return res, err } -func (sm *BoostAPI) BoostDagstoreGC(ctx context.Context) ([]api.DagstoreShardResult, error) { - if sm.DAGStore == nil { - return nil, fmt.Errorf("dagstore not available on this node") - } - - res, err := sm.DAGStore.GC(ctx) - if err != nil { - return nil, fmt.Errorf("failed to gc: %w", err) - } - - ret := make([]api.DagstoreShardResult, 0, len(res.Shards)) - for k, err := range res.Shards { - r := api.DagstoreShardResult{Key: k.String()} - if err == nil { - r.Success = true - } else { - r.Success = false - r.Error = err.Error() - } - ret = append(ret, r) - } - - return ret, nil -} - -func (sm *BoostAPI) BoostDagstoreListShards(ctx context.Context) ([]api.DagstoreShardInfo, error) { - if sm.DAGStore == nil { - return nil, fmt.Errorf("dagstore not available on this node") - } - - info := sm.DAGStore.AllShardsInfo() - ret := make([]api.DagstoreShardInfo, 0, len(info)) - for k, i := range info { - ret = append(ret, api.DagstoreShardInfo{ - Key: k.String(), - State: i.ShardState.String(), - Error: func() string { - if i.Error == nil { - return "" - } - return i.Error.Error() - }(), - }) - } - - // order by key. - sort.SliceStable(ret, func(i, j int) bool { - return ret[i].Key < ret[j].Key - }) - - return ret, nil -} - -func (sm *BoostAPI) BoostDagstorePiecesContainingMultihash(ctx context.Context, mh multihash.Multihash) ([]cid.Cid, error) { - ctx, span := tracing.Tracer.Start(ctx, "Boost.BoostDagstorePiecesContainingMultihash") - span.SetAttributes(attribute.String("multihash", mh.String())) - defer span.End() - - if sm.DAGStore == nil { - return nil, fmt.Errorf("dagstore not available on this node") - } - - ks, err := sm.DAGStore.ShardsContainingMultihash(ctx, mh) - if err != nil { - return nil, fmt.Errorf("getting pieces containing multihash %s from DAG store: %w", mh, err) - } - - pieceCids := make([]cid.Cid, 0, len(ks)) - for _, k := range ks { - pieceCid, err := cid.Parse(k.String()) - if err != nil { - return nil, fmt.Errorf("parsing DAG store shard key '%s' into cid: %w", k, err) - } - pieceCids = append(pieceCids, pieceCid) - } - - return pieceCids, nil -} - -func (sm *BoostAPI) BoostDagstoreInitializeAll(ctx context.Context, params api.DagstoreInitializeAllParams) (<-chan api.DagstoreInitializeAllEvent, error) { - if sm.DAGStore == nil { - return nil, fmt.Errorf("dagstore not available on this node") - } - - if sm.SectorAccessor == nil { - return nil, fmt.Errorf("sector accessor not available on this node") - } - - // prepare the thottler tokens. - var throttle chan struct{} - if c := params.MaxConcurrency; c > 0 { - throttle = make(chan struct{}, c) - for i := 0; i < c; i++ { - throttle <- struct{}{} - } - } - - // are we initializing only unsealed pieces? - onlyUnsealed := !params.IncludeSealed - - info := sm.DAGStore.AllShardsInfo() - var toInitialize []string - for k, i := range info { - if i.ShardState != dagstore.ShardStateNew { - continue - } - - // if we're initializing only unsealed pieces, check if there's an - // unsealed deal for this piece available. - if onlyUnsealed { - pieceCid, err := cid.Decode(k.String()) - if err != nil { - log.Warnw("DagstoreInitializeAll: failed to decode shard key as piece CID; skipping", "shard_key", k.String(), "error", err) - continue - } - - pi, err := sm.PieceStore.GetPieceInfo(pieceCid) - if err != nil { - log.Warnw("DagstoreInitializeAll: failed to get piece info; skipping", "piece_cid", pieceCid, "error", err) - continue - } - - var isUnsealed bool - for _, d := range pi.Deals { - isUnsealed, err = sm.SectorAccessor.IsUnsealed(ctx, d.SectorID, d.Offset.Unpadded(), d.Length.Unpadded()) - if err != nil { - log.Warnw("DagstoreInitializeAll: failed to get unsealed status; skipping deal", "deal_id", d.DealID, "error", err) - continue - } - if isUnsealed { - break - } - } - - if !isUnsealed { - log.Infow("DagstoreInitializeAll: skipping piece because it's sealed", "piece_cid", pieceCid, "error", err) - continue - } - } - - // yes, we're initializing this shard. - toInitialize = append(toInitialize, k.String()) - } - - total := len(toInitialize) - if total == 0 { - out := make(chan api.DagstoreInitializeAllEvent) - close(out) - return out, nil - } - - // response channel must be closed when we're done, or the context is cancelled. - // this buffering is necessary to prevent inflight children goroutines from - // publishing to a closed channel (res) when the context is cancelled. - out := make(chan api.DagstoreInitializeAllEvent, 32) // internal buffer. - res := make(chan api.DagstoreInitializeAllEvent, 32) // returned to caller. - - // pump events back to caller. - // two events per shard. - go func() { - defer close(res) - - for i := 0; i < total*2; i++ { - select { - case res <- <-out: - case <-ctx.Done(): - return - } - } - }() - - go func() { - for i, k := range toInitialize { - if throttle != nil { - select { - case <-throttle: - // acquired a throttle token, proceed. - case <-ctx.Done(): - return - } - } - - go func(k string, i int) { - r := api.DagstoreInitializeAllEvent{ - Key: k, - Event: "start", - Total: total, - Current: i + 1, // start with 1 - } - select { - case out <- r: - case <-ctx.Done(): - return - } - - err := sm.BoostDagstoreInitializeShard(ctx, k) - - if throttle != nil { - throttle <- struct{}{} - } - - r.Event = "end" - if err == nil { - r.Success = true - } else { - r.Success = false - r.Error = err.Error() - } - - select { - case out <- r: - case <-ctx.Done(): - } - }(k, i) - } - }() - - return res, nil -} - -func (sm *BoostAPI) BoostDagstoreInitializeShard(ctx context.Context, key string) error { - if sm.DAGStore == nil { - return fmt.Errorf("dagstore not available on this node") - } - - k := shard.KeyFromString(key) - - info, err := sm.DAGStore.GetShardInfo(k) - if err != nil { - return fmt.Errorf("failed to get shard info: %w", err) - } - if st := info.ShardState; st != dagstore.ShardStateNew { - return fmt.Errorf("cannot initialize shard; expected state ShardStateNew, was: %s", st.String()) - } - - ch := make(chan dagstore.ShardResult, 1) - if err = sm.DAGStore.AcquireShard(ctx, k, ch, dagstore.AcquireOpts{}); err != nil { - return fmt.Errorf("failed to acquire shard: %w", err) - } - - var res dagstore.ShardResult - select { - case res = <-ch: - case <-ctx.Done(): - return ctx.Err() - } - - if err := res.Error; err != nil { - return fmt.Errorf("failed to acquire shard: %w", err) - } - - if res.Accessor != nil { - err = res.Accessor.Close() - if err != nil { - log.Warnw("failed to close shard accessor; continuing", "shard_key", k, "error", err) - } - } - - return nil -} - -func (sm *BoostAPI) BoostDagstoreRegisterShard(ctx context.Context, key string) error { - if sm.DAGStore == nil { - return fmt.Errorf("dagstore not available on this node") - } - - // First check if the shard has already been registered - k := shard.KeyFromString(key) - _, err := sm.DAGStore.GetShardInfo(k) - if err == nil { - // Shard already registered, nothing further to do - return nil - } - // If the shard is not registered we would expect ErrShardUnknown - if !errors.Is(err, dagstore.ErrShardUnknown) { - return fmt.Errorf("getting shard info from DAG store: %w", err) - } - - pieceCid, err := cid.Parse(key) - if err != nil { - return fmt.Errorf("parsing shard key as piece cid: %w", err) - } - if err = registerShardSync(ctx, sm.DagStoreWrapper, pieceCid, "", true); err != nil { - return fmt.Errorf("failed to register shard: %w", err) - } - - return nil -} - -func (sm *BoostAPI) BoostDagstoreRecoverShard(ctx context.Context, key string) error { - if sm.DAGStore == nil { - return fmt.Errorf("dagstore not available on this node") - } - - k := shard.KeyFromString(key) - - info, err := sm.DAGStore.GetShardInfo(k) - if err != nil { - return fmt.Errorf("failed to get shard info: %w", err) - } - if st := info.ShardState; st != dagstore.ShardStateErrored { - return fmt.Errorf("cannot recover shard; expected state ShardStateErrored, was: %s", st.String()) - } - - ch := make(chan dagstore.ShardResult, 1) - if err = sm.DAGStore.RecoverShard(ctx, k, ch, dagstore.RecoverOpts{}); err != nil { - return fmt.Errorf("failed to recover shard: %w", err) - } - - var res dagstore.ShardResult - select { - case res = <-ch: - case <-ctx.Done(): - return ctx.Err() - } - - return res.Error -} - -func (sm *BoostAPI) BoostDagstoreDestroyShard(ctx context.Context, key string) error { - if sm.DAGStore == nil { - return fmt.Errorf("dagstore not available on this node") - } - - // First check if the shard has already been registered - k := shard.KeyFromString(key) - _, err := sm.DAGStore.GetShardInfo(k) - if err != nil { - return fmt.Errorf("unable to query dagstore for shard info: %w", err) - } - - pieceCid, err := cid.Parse(key) - if err != nil { - return fmt.Errorf("parsing shard key as piece cid: %w", err) - } - if err = destroyShardSync(ctx, sm.DagStoreWrapper, pieceCid); err != nil { - return fmt.Errorf("failed to destroy shard: %w", err) - } - return nil -} - func (sm *BoostAPI) BoostMakeDeal(ctx context.Context, params types.DealParams) (*api.ProviderDealRejectionInfo, error) { log.Infow("received json-rpc deal proposal", "id", params.DealUUID) return sm.StorageProvider.ExecuteDeal(ctx, ¶ms, "json-rpc-deal") @@ -554,32 +194,3 @@ func (sm *BoostAPI) PdBuildIndexForPieceCid(ctx context.Context, piececid cid.Ci func (sm *BoostAPI) OnlineBackup(ctx context.Context, dstDir string) error { return sm.Bkp.Backup(ctx, dstDir) } - -func registerShardSync(ctx context.Context, ds *mktsdagstore.Wrapper, pieceCid cid.Cid, carPath string, eagerInit bool) error { - resch := make(chan dagstore.ShardResult, 1) - if err := ds.RegisterShard(ctx, pieceCid, carPath, eagerInit, resch); err != nil { - return err - } - - select { - case <-ctx.Done(): - return ctx.Err() - case res := <-resch: - return res.Error - } -} - -func destroyShardSync(ctx context.Context, ds *mktsdagstore.Wrapper, pieceCid cid.Cid) error { - resch := make(chan dagstore.ShardResult, 1) - - if err := ds.DestroyShard(ctx, pieceCid, resch); err != nil { - return err - } - - select { - case <-ctx.Done(): - return ctx.Err() - case res := <-resch: - return res.Error - } -} diff --git a/node/impl/boost_legacy.go b/node/impl/boost_legacy.go deleted file mode 100644 index 5e4c4ce1b..000000000 --- a/node/impl/boost_legacy.go +++ /dev/null @@ -1,239 +0,0 @@ -package impl - -import ( - "context" - "fmt" - "os" - "strconv" - "time" - - "github.com/filecoin-project/boost-gfm/retrievalmarket" - "github.com/filecoin-project/boost-gfm/storagemarket" - "github.com/filecoin-project/boost/api" - "github.com/filecoin-project/go-address" - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-state-types/abi" - lapi "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/types" - "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p/core/peer" -) - -func (sm *BoostAPI) MarketListDataTransfers(ctx context.Context) ([]api.DataTransferChannel, error) { - inProgressChannels, err := sm.DataTransfer.InProgressChannels(ctx) - if err != nil { - return nil, err - } - - unpaidRetrievals := sm.GraphsyncUnpaidRetrieval.List() - - // Get legacy, paid retrievals - apiChannels := make([]api.DataTransferChannel, 0, len(inProgressChannels)+len(unpaidRetrievals)) - for _, channelState := range inProgressChannels { - apiChannels = append(apiChannels, api.NewDataTransferChannel(sm.Host.ID(), channelState)) - } - - // Include unpaid retrievals - for _, ur := range unpaidRetrievals { - apiChannels = append(apiChannels, api.NewDataTransferChannel(sm.Host.ID(), ur.ChannelState())) - } - - return apiChannels, nil -} - -func (sm *BoostAPI) MarketRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error { - selfPeer := sm.Host.ID() - if isInitiator { - return sm.DataTransfer.RestartDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: selfPeer, Responder: otherPeer, ID: transferID}) - } - return sm.DataTransfer.RestartDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: otherPeer, Responder: selfPeer, ID: transferID}) -} - -func (sm *BoostAPI) MarketCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error { - selfPeer := sm.Host.ID() - - // Attempt to cancel unpaid first, if that succeeds, we're done - err := sm.GraphsyncUnpaidRetrieval.CancelTransfer(ctx, transferID, &otherPeer) - if err == nil { - return nil - } - - // Legacy, paid retrievals - if isInitiator { - return sm.DataTransfer.CloseDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: selfPeer, Responder: otherPeer, ID: transferID}) - } - return sm.DataTransfer.CloseDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: otherPeer, Responder: selfPeer, ID: transferID}) -} - -func (sm *BoostAPI) MarketDataTransferUpdates(ctx context.Context) (<-chan api.DataTransferChannel, error) { - channels := make(chan api.DataTransferChannel) - - unsub := sm.DataTransfer.SubscribeToEvents(func(evt datatransfer.Event, channelState datatransfer.ChannelState) { - channel := api.NewDataTransferChannel(sm.Host.ID(), channelState) - select { - case <-ctx.Done(): - case channels <- channel: - } - }) - - go func() { - defer unsub() - <-ctx.Done() - }() - - return channels, nil -} - -func (sm *BoostAPI) MarketListRetrievalDeals(ctx context.Context) ([]retrievalmarket.ProviderDealState, error) { - deals := sm.RetrievalProvider.ListDeals() - unpaidRetrievals := sm.GraphsyncUnpaidRetrieval.List() - - out := make([]retrievalmarket.ProviderDealState, 0, len(deals)+len(unpaidRetrievals)) - - for _, deal := range deals { - if deal.ChannelID != nil { - if deal.ChannelID.Initiator == "" || deal.ChannelID.Responder == "" { - deal.ChannelID = nil // don't try to push unparsable peer IDs over jsonrpc - } - } - out = append(out, deal) - } - - for _, ur := range unpaidRetrievals { - out = append(out, ur.ProviderDealState()) - } - - return out, nil -} - -func (sm *BoostAPI) MarketImportDealData(ctx context.Context, propCid cid.Cid, path string) error { - fi, err := os.Open(path) - if err != nil { - return fmt.Errorf("failed to open file: %w", err) - } - defer fi.Close() //nolint:errcheck - - return sm.LegacyStorageProvider.ImportDataForDeal(ctx, propCid, fi) -} - -func (sm *BoostAPI) MarketSetRetrievalAsk(ctx context.Context, rask *retrievalmarket.Ask) error { - sm.RetrievalProvider.SetAsk(rask) - return nil -} - -func (sm *BoostAPI) MarketGetRetrievalAsk(ctx context.Context) (*retrievalmarket.Ask, error) { - return sm.RetrievalProvider.GetAsk(), nil -} - -func (sm *BoostAPI) DealsConsiderOnlineStorageDeals(ctx context.Context) (bool, error) { - return sm.ConsiderOnlineStorageDealsConfigFunc() -} - -func (sm *BoostAPI) DealsSetConsiderOnlineStorageDeals(ctx context.Context, b bool) error { - return sm.SetConsiderOnlineStorageDealsConfigFunc(b) -} - -func (sm *BoostAPI) DealsConsiderOnlineRetrievalDeals(ctx context.Context) (bool, error) { - return sm.ConsiderOnlineRetrievalDealsConfigFunc() -} - -func (sm *BoostAPI) DealsSetConsiderOnlineRetrievalDeals(ctx context.Context, b bool) error { - return sm.SetConsiderOnlineRetrievalDealsConfigFunc(b) -} - -func (sm *BoostAPI) DealsConsiderOfflineStorageDeals(ctx context.Context) (bool, error) { - return sm.ConsiderOfflineStorageDealsConfigFunc() -} - -func (sm *BoostAPI) DealsSetConsiderOfflineStorageDeals(ctx context.Context, b bool) error { - return sm.SetConsiderOfflineStorageDealsConfigFunc(b) -} - -func (sm *BoostAPI) DealsConsiderOfflineRetrievalDeals(ctx context.Context) (bool, error) { - return sm.ConsiderOfflineRetrievalDealsConfigFunc() -} - -func (sm *BoostAPI) DealsSetConsiderOfflineRetrievalDeals(ctx context.Context, b bool) error { - return sm.SetConsiderOfflineRetrievalDealsConfigFunc(b) -} - -func (sm *BoostAPI) DealsConsiderVerifiedStorageDeals(ctx context.Context) (bool, error) { - return sm.ConsiderVerifiedStorageDealsConfigFunc() -} - -func (sm *BoostAPI) DealsSetConsiderVerifiedStorageDeals(ctx context.Context, b bool) error { - return sm.SetConsiderVerifiedStorageDealsConfigFunc(b) -} - -func (sm *BoostAPI) DealsConsiderUnverifiedStorageDeals(ctx context.Context) (bool, error) { - return sm.ConsiderUnverifiedStorageDealsConfigFunc() -} - -func (sm *BoostAPI) DealsSetConsiderUnverifiedStorageDeals(ctx context.Context, b bool) error { - return sm.SetConsiderUnverifiedStorageDealsConfigFunc(b) -} - -func (sm *BoostAPI) DealsGetExpectedSealDurationFunc(ctx context.Context) (time.Duration, error) { - return sm.GetExpectedSealDurationFunc() -} - -func (sm *BoostAPI) DealsSetExpectedSealDurationFunc(ctx context.Context, d time.Duration) error { - return sm.SetExpectedSealDurationFunc(d) -} - -func (sm *BoostAPI) DealsPieceCidBlocklist(ctx context.Context) ([]cid.Cid, error) { - return sm.StorageDealPieceCidBlocklistConfigFunc() -} - -func (sm *BoostAPI) DealsSetPieceCidBlocklist(ctx context.Context, cids []cid.Cid) error { - return sm.SetStorageDealPieceCidBlocklistConfigFunc(cids) -} - -func (sm *BoostAPI) MarketSetAsk(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error { - options := []storagemarket.StorageAskOption{ - storagemarket.MinPieceSize(minPieceSize), - storagemarket.MaxPieceSize(maxPieceSize), - } - - return sm.LegacyStorageProvider.SetAsk(price, verifiedPrice, duration, options...) -} - -func (sm *BoostAPI) MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) { - return sm.LegacyStorageProvider.ListLocalDeals() -} - -func (sm *BoostAPI) MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error) { - return sm.LegacyStorageProvider.GetAsk(), nil -} - -func (sm *BoostAPI) ActorSectorSize(ctx context.Context, addr address.Address) (abi.SectorSize, error) { - mi, err := sm.Full.StateMinerInfo(ctx, addr, types.EmptyTSK) - if err != nil { - return 0, err - } - return mi.SectorSize, nil -} - -func (sm *BoostAPI) RuntimeSubsystems(context.Context) (res lapi.MinerSubsystems, err error) { - return []lapi.MinerSubsystem{lapi.SubsystemMarkets}, nil -} - -func (sm *BoostAPI) MarketPendingDeals(ctx context.Context) (lapi.PendingDealInfo, error) { - return sm.DealPublisher.PendingDeals(), nil -} - -func (sm *BoostAPI) SectorsRefs(ctx context.Context) (map[string][]lapi.SealedRef, error) { - // json can't handle cids as map keys - out := map[string][]lapi.SealedRef{} - - refs, err := sm.SectorBlocks.List(ctx) - if err != nil { - return nil, err - } - - for k, v := range refs { - out[strconv.FormatUint(k, 10)] = v - } - - return out, nil -} diff --git a/node/modules/client.go b/node/modules/client.go deleted file mode 100644 index 08751f981..000000000 --- a/node/modules/client.go +++ /dev/null @@ -1,176 +0,0 @@ -package modules - -import ( - "bytes" - "context" - "errors" - "fmt" - "os" - "path/filepath" - "time" - - "go.uber.org/fx" - - "github.com/filecoin-project/go-state-types/abi" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" - "github.com/libp2p/go-libp2p/core/host" - - "github.com/filecoin-project/boost-gfm/discovery" - discoveryimpl "github.com/filecoin-project/boost-gfm/discovery/impl" - "github.com/filecoin-project/boost-gfm/retrievalmarket" - retrievalimpl "github.com/filecoin-project/boost-gfm/retrievalmarket/impl" - rmnet "github.com/filecoin-project/boost-gfm/retrievalmarket/network" - "github.com/filecoin-project/boost-gfm/storagemarket" - storageimpl "github.com/filecoin-project/boost-gfm/storagemarket/impl" - "github.com/filecoin-project/boost-gfm/storagemarket/impl/requestvalidation" - smnet "github.com/filecoin-project/boost-gfm/storagemarket/network" - "github.com/filecoin-project/boost/markets" - marketevents "github.com/filecoin-project/boost/markets/loggers" - "github.com/filecoin-project/boost/markets/retrievaladapter" - "github.com/filecoin-project/boost/markets/storageadapter" - "github.com/filecoin-project/boost/node/modules/dtypes" - "github.com/filecoin-project/lotus/blockstore" - "github.com/filecoin-project/lotus/chain/market" - "github.com/filecoin-project/lotus/journal" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/node/impl/full" - payapi "github.com/filecoin-project/lotus/node/impl/paych" - lotus_dtypes "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/node/repo" - "github.com/filecoin-project/lotus/node/repo/imports" -) - -func HandleMigrateClientFunds(lc fx.Lifecycle, ds lotus_dtypes.MetadataDS, wallet full.WalletAPI, fundMgr *market.FundManager) { - lc.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { - addr, err := wallet.WalletDefaultAddress(ctx) - // nothing to be done if there is no default address - if err != nil { - return nil - } - b, err := ds.Get(ctx, datastore.NewKey("/marketfunds/client")) - if err != nil { - if errors.Is(err, datastore.ErrNotFound) { - return nil - } - log.Errorf("client funds migration - getting datastore value: %v", err) - return nil - } - - var value abi.TokenAmount - if err = value.UnmarshalCBOR(bytes.NewReader(b)); err != nil { - log.Errorf("client funds migration - unmarshalling datastore value: %v", err) - return nil - } - _, err = fundMgr.Reserve(ctx, addr, addr, value) - if err != nil { - log.Errorf("client funds migration - reserving funds (wallet %s, addr %s, funds %d): %v", - addr, addr, value, err) - return nil - } - - return ds.Delete(ctx, datastore.NewKey("/marketfunds/client")) - }, - }) -} - -func ClientImportMgr(ds lotus_dtypes.MetadataDS, r repo.LockedRepo) (lotus_dtypes.ClientImportMgr, error) { - // store the imports under the repo's `imports` subdirectory. - dir := filepath.Join(r.Path(), "imports") - if err := os.MkdirAll(dir, 0755); err != nil { - return nil, fmt.Errorf("failed to create directory %s: %w", dir, err) - } - - ns := namespace.Wrap(ds, datastore.NewKey("/client")) - return imports.NewManager(ns, dir), nil -} - -// TODO this should be removed. -func ClientBlockstore() dtypes.ClientBlockstore { - // in most cases this is now unused in normal operations -- however, it's important to preserve for the IPFS use case - return blockstore.WrapIDStore(blockstore.FromDatastore(datastore.NewMapDatastore())) -} - -// RegisterClientValidator is an initialization hook that registers the client -// request validator with the data transfer module as the validator for -// StorageDataTransferVoucher types -func RegisterClientValidator(crv dtypes.ClientRequestValidator, dtm dtypes.ClientDataTransfer) { - if err := dtm.RegisterVoucherType(&requestvalidation.StorageDataTransferVoucher{}, (*requestvalidation.UnifiedRequestValidator)(crv)); err != nil { - panic(err) - } -} - -// NewClientDatastore creates a datastore for the client to store its deals -func NewClientDatastore(ds lotus_dtypes.MetadataDS) dtypes.ClientDatastore { - return namespace.Wrap(ds, datastore.NewKey("/deals/client")) -} - -// StorageBlockstoreAccessor returns the default storage blockstore accessor -// from the import manager. -func StorageBlockstoreAccessor(importmgr dtypes.ClientImportMgr) storagemarket.BlockstoreAccessor { - return storageadapter.NewImportsBlockstoreAccessor(importmgr) -} - -// RetrievalBlockstoreAccessor returns the default retrieval blockstore accessor -// using the subdirectory `retrievals` under the repo. -func RetrievalBlockstoreAccessor(r repo.LockedRepo) (retrievalmarket.BlockstoreAccessor, error) { - dir := filepath.Join(r.Path(), "retrievals") - if err := os.MkdirAll(dir, 0755); err != nil { - return nil, fmt.Errorf("failed to create directory %s: %w", dir, err) - } - return retrievaladapter.NewCARBlockstoreAccessor(dir), nil -} - -func StorageClient(lc fx.Lifecycle, h host.Host, dataTransfer dtypes.ClientDataTransfer, discovery *discoveryimpl.Local, - deals dtypes.ClientDatastore, scn storagemarket.StorageClientNode, accessor storagemarket.BlockstoreAccessor, j journal.Journal) (storagemarket.StorageClient, error) { - // go-fil-markets protocol retries: - // 1s, 5s, 25s, 2m5s, 5m x 11 ~= 1 hour - marketsRetryParams := smnet.RetryParameters(time.Second, 5*time.Minute, 15, 5) - net := smnet.NewFromLibp2pHost(h, marketsRetryParams) - - c, err := storageimpl.NewClient(net, dataTransfer, discovery, deals, scn, accessor, storageimpl.DealPollingInterval(time.Second), storageimpl.MaxTraversalLinks(config.MaxTraversalLinks)) - if err != nil { - return nil, err - } - c.OnReady(marketevents.ReadyLogger("storage client")) - lc.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { - c.SubscribeToEvents(marketevents.StorageClientLogger) - - evtType := j.RegisterEventType("markets/storage/client", "state_change") - c.SubscribeToEvents(markets.StorageClientJournaler(j, evtType)) - - return c.Start(ctx) - }, - OnStop: func(context.Context) error { - return c.Stop() - }, - }) - return c, nil -} - -// RetrievalClient creates a new retrieval client attached to the client blockstore -func RetrievalClient(lc fx.Lifecycle, h host.Host, r repo.LockedRepo, dt dtypes.ClientDataTransfer, payAPI payapi.PaychAPI, resolver discovery.PeerResolver, - ds lotus_dtypes.MetadataDS, chainAPI full.ChainAPI, stateAPI full.StateAPI, accessor retrievalmarket.BlockstoreAccessor, j journal.Journal) (retrievalmarket.RetrievalClient, error) { - - adapter := retrievaladapter.NewRetrievalClientNode(false, payAPI, chainAPI, stateAPI) - network := rmnet.NewFromLibp2pHost(h) - ds = namespace.Wrap(ds, datastore.NewKey("/retrievals/client")) - client, err := retrievalimpl.NewClient(network, dt, adapter, resolver, ds, accessor) - if err != nil { - return nil, err - } - client.OnReady(marketevents.ReadyLogger("retrieval client")) - lc.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { - client.SubscribeToEvents(marketevents.RetrievalClientLogger) - - evtType := j.RegisterEventType("markets/retrieval/client", "state_change") - client.SubscribeToEvents(markets.RetrievalClientJournaler(j, evtType)) - - return client.Start(ctx) - }, - }) - return client, nil -} diff --git a/node/modules/dealfilter.go b/node/modules/dealfilter.go index d916d7989..36eb3d4ef 100644 --- a/node/modules/dealfilter.go +++ b/node/modules/dealfilter.go @@ -5,8 +5,8 @@ import ( "fmt" "time" - "github.com/filecoin-project/boost-gfm/retrievalmarket" "github.com/filecoin-project/boost/node/modules/dtypes" + "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" "github.com/filecoin-project/boost/storagemarket/dealfilter" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api/v1api" @@ -140,7 +140,7 @@ func RetrievalDealFilter(userFilter dtypes.RetrievalDealFilter) func(onlineOk dt offlineOk dtypes.ConsiderOfflineRetrievalDealsConfigFunc) dtypes.RetrievalDealFilter { return func(onlineOk dtypes.ConsiderOnlineRetrievalDealsConfigFunc, offlineOk dtypes.ConsiderOfflineRetrievalDealsConfigFunc) dtypes.RetrievalDealFilter { - return func(ctx context.Context, state retrievalmarket.ProviderDealState) (bool, string, error) { + return func(ctx context.Context, state legacyretrievaltypes.ProviderDealState) (bool, string, error) { b, err := onlineOk() if err != nil { return false, "miner error", err diff --git a/node/modules/dtypes/miner.go b/node/modules/dtypes/miner.go index 99ed3e6a0..248093888 100644 --- a/node/modules/dtypes/miner.go +++ b/node/modules/dtypes/miner.go @@ -2,10 +2,11 @@ package dtypes import ( "context" - "github.com/ipfs/go-cid" "time" - "github.com/filecoin-project/boost-gfm/retrievalmarket" + "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" + "github.com/ipfs/go-cid" + "github.com/filecoin-project/boost/storagemarket/dealfilter" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/storage/pipeline/sealiface" @@ -92,4 +93,4 @@ type GetMaxDealStartDelayFunc func() (time.Duration, error) type StorageDealFilter dealfilter.StorageDealFilter type RetrievalDealFilter dealfilter.RetrievalDealFilter -type RetrievalPricingFunc func(ctx context.Context, dealPricingParams retrievalmarket.PricingInput) (retrievalmarket.Ask, error) +type RetrievalPricingFunc func(ctx context.Context, dealPricingParams legacyretrievaltypes.PricingInput) (legacyretrievaltypes.Ask, error) diff --git a/node/modules/dtypes/storage.go b/node/modules/dtypes/storage.go index 901b76197..b117621af 100644 --- a/node/modules/dtypes/storage.go +++ b/node/modules/dtypes/storage.go @@ -2,15 +2,13 @@ package dtypes import ( graphsync "github.com/filecoin-project/boost-graphsync" - datatransfer "github.com/filecoin-project/go-data-transfer" - dtnet "github.com/filecoin-project/go-data-transfer/network" + "github.com/filecoin-project/boost/datatransfer" + dtnet "github.com/filecoin-project/boost/datatransfer/network" "github.com/filecoin-project/go-statestore" bserv "github.com/ipfs/boxo/blockservice" - exchange "github.com/ipfs/boxo/exchange" + "github.com/ipfs/boxo/exchange" "github.com/ipfs/go-datastore" - "github.com/filecoin-project/boost-gfm/piecestore" - "github.com/filecoin-project/boost-gfm/storagemarket/impl/requestvalidation" ipfsblockstore "github.com/ipfs/boxo/blockstore" "github.com/filecoin-project/lotus/blockstore" @@ -72,21 +70,14 @@ type ChainBlockService bserv.BlockService type ClientImportMgr *imports.Manager type ClientBlockstore blockstore.BasicBlockstore type ClientDealStore *statestore.StateStore -type ClientRequestValidator *requestvalidation.UnifiedRequestValidator type ClientDatastore datastore.Batching type Graphsync graphsync.GraphExchange -// ClientDataTransfer is a data transfer manager for the client -type ClientDataTransfer datatransfer.Manager -type ProviderDataTransfer datatransfer.Manager type ProviderTransferNetwork dtnet.DataTransferNetwork type ProviderTransport datatransfer.Transport type ProviderDealStore *statestore.StateStore -type ProviderPieceStore piecestore.PieceStore - -type ProviderRequestValidator *requestvalidation.UnifiedRequestValidator type StagingBlockstore blockstore.BasicBlockstore type StagingGraphsync graphsync.GraphExchange diff --git a/node/modules/graphsync.go b/node/modules/graphsync.go index 6f9e76b0f..529e3f563 100644 --- a/node/modules/graphsync.go +++ b/node/modules/graphsync.go @@ -5,8 +5,6 @@ import ( "sync" "time" - "github.com/filecoin-project/boost-gfm/retrievalmarket" - retrievalimpl "github.com/filecoin-project/boost-gfm/retrievalmarket/impl" graphsync "github.com/filecoin-project/boost-graphsync/impl" gsnet "github.com/filecoin-project/boost-graphsync/network" "github.com/filecoin-project/boost-graphsync/storeutil" @@ -16,45 +14,33 @@ import ( "github.com/filecoin-project/boost/node/modules/dtypes" "github.com/filecoin-project/boost/piecedirectory" "github.com/filecoin-project/boost/retrievalmarket/server" + retrievalimpl "github.com/filecoin-project/boost/retrievalmarket/server" + "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/metrics" lotus_helpers "github.com/filecoin-project/lotus/node/modules/helpers" "github.com/ipfs/kubo/core/node/helpers" "github.com/ipld/go-ipld-prime" - provider "github.com/ipni/index-provider" - "github.com/ipni/index-provider/engine" "github.com/libp2p/go-libp2p/core/host" "go.opencensus.io/stats" "go.uber.org/fx" ) -var _ server.AskGetter = (*ProxyAskGetter)(nil) +type RetrievalAskGetter struct { + ask legacyretrievaltypes.Ask +} -// ProxyAskGetter is used to avoid circular dependencies: -// RetrievalProvider depends on RetrievalGraphsync, which depends on RetrievalProvider's -// GetAsk method. -// We create an AskGetter that returns zero-priced asks by default. -// Then we set the AskGetter to the RetrievalProvider after it's been created. -type ProxyAskGetter struct { - server.AskGetter +func (rag *RetrievalAskGetter) GetAsk() *legacyretrievaltypes.Ask { + return &rag.ask } -func (ag *ProxyAskGetter) GetAsk() *retrievalmarket.Ask { - if ag.AskGetter == nil { - return &retrievalmarket.Ask{ +func NewRetrievalAskGetter() *RetrievalAskGetter { + return &RetrievalAskGetter{ + ask: legacyretrievaltypes.Ask{ PricePerByte: abi.NewTokenAmount(0), UnsealPrice: abi.NewTokenAmount(0), - } + }, } - return ag.AskGetter.GetAsk() -} - -func NewAskGetter() *ProxyAskGetter { - return &ProxyAskGetter{} -} - -func SetAskGetter(proxy *ProxyAskGetter, rp retrievalmarket.RetrievalProvider) { - proxy.AskGetter = rp } // LinkSystemProv is used to avoid circular dependencies @@ -70,16 +56,9 @@ func (p *LinkSystemProv) LinkSys() *ipld.LinkSystem { return p.LinkSystem } -func SetLinkSystem(proxy *LinkSystemProv, prov provider.Interface) { - e, ok := prov.(*engine.Engine) - if ok { - proxy.LinkSystem = e.LinkSystem() - } -} - // RetrievalGraphsync creates a graphsync instance used to serve retrievals. -func RetrievalGraphsync(parallelTransfersForStorage uint64, parallelTransfersForStoragePerPeer uint64, parallelTransfersForRetrieval uint64) func(mctx lotus_helpers.MetricsCtx, lc fx.Lifecycle, pid *piecedirectory.PieceDirectory, h host.Host, net dtypes.ProviderTransferNetwork, dealDecider dtypes.RetrievalDealFilter, pstore dtypes.ProviderPieceStore, sa *lib.MultiMinerAccessor, askGetter server.AskGetter, ls server.LinkSystemProvider) (*server.GraphsyncUnpaidRetrieval, error) { - return func(mctx lotus_helpers.MetricsCtx, lc fx.Lifecycle, pid *piecedirectory.PieceDirectory, h host.Host, net dtypes.ProviderTransferNetwork, dealDecider dtypes.RetrievalDealFilter, pstore dtypes.ProviderPieceStore, sa *lib.MultiMinerAccessor, askGetter server.AskGetter, ls server.LinkSystemProvider) (*server.GraphsyncUnpaidRetrieval, error) { +func RetrievalGraphsync(parallelTransfersForStorage uint64, parallelTransfersForStoragePerPeer uint64, parallelTransfersForRetrieval uint64) func(mctx lotus_helpers.MetricsCtx, lc fx.Lifecycle, pid *piecedirectory.PieceDirectory, h host.Host, net dtypes.ProviderTransferNetwork, dealDecider dtypes.RetrievalDealFilter, sa *lib.MultiMinerAccessor, askGetter *RetrievalAskGetter) (*server.GraphsyncUnpaidRetrieval, error) { + return func(mctx lotus_helpers.MetricsCtx, lc fx.Lifecycle, pid *piecedirectory.PieceDirectory, h host.Host, net dtypes.ProviderTransferNetwork, dealDecider dtypes.RetrievalDealFilter, sa *lib.MultiMinerAccessor, askGetter *RetrievalAskGetter) (*server.GraphsyncUnpaidRetrieval, error) { // Graphsync tracks metrics separately, pass nil blockMetrics to the remote blockstore rb := remoteblockstore.NewRemoteBlockstore(pid, nil) @@ -94,7 +73,7 @@ func RetrievalGraphsync(parallelTransfersForStorage uint64, parallelTransfersFor SectorAccessor: sa, AskStore: askGetter, } - gsupr, err := server.NewGraphsyncUnpaidRetrieval(h.ID(), gs, net, vdeps, ls) + gsupr, err := server.NewGraphsyncUnpaidRetrieval(h.ID(), gs, net, vdeps) if err != nil { return nil, err } diff --git a/node/modules/legacy_markets.go b/node/modules/legacy_markets.go deleted file mode 100644 index 9cffb5bf0..000000000 --- a/node/modules/legacy_markets.go +++ /dev/null @@ -1,127 +0,0 @@ -package modules - -import ( - "context" - "fmt" - "os" - "path/filepath" - - piecefilestore "github.com/filecoin-project/boost-gfm/filestore" - "github.com/filecoin-project/boost-gfm/storagemarket" - storageimpl "github.com/filecoin-project/boost-gfm/storagemarket/impl" - "github.com/filecoin-project/boost-gfm/storagemarket/impl/storedask" - smnet "github.com/filecoin-project/boost-gfm/storagemarket/network" - "github.com/filecoin-project/boost-gfm/stores" - "github.com/filecoin-project/boost/markets/idxprov" - "github.com/filecoin-project/boost/node/config" - "github.com/filecoin-project/boost/node/modules/dtypes" - "github.com/filecoin-project/go-address" - datatransferv2 "github.com/filecoin-project/go-data-transfer/v2" - lotus_gfm_filestore "github.com/filecoin-project/go-fil-markets/filestore" - lotus_gfm_storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket" - lotus_modules "github.com/filecoin-project/lotus/node/modules" - lotus_dtypes "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/node/repo" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" - provider "github.com/ipni/index-provider" - "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/protocol" -) - -func StorageProvider(minerAddress lotus_dtypes.MinerAddress, - storedAsk *storedask.StoredAsk, - h host.Host, ds lotus_dtypes.MetadataDS, - r repo.LockedRepo, - pieceStore dtypes.ProviderPieceStore, - indexer provider.Interface, - dataTransfer dtypes.ProviderDataTransfer, - spn storagemarket.StorageProviderNode, - df storageimpl.DealDeciderFunc, - dsw stores.DAGStoreWrapper, - meshCreator idxprov.MeshCreator, cfg config.DealmakingConfig, -) (storagemarket.StorageProvider, error) { - var opts []smnet.Option - - // Provide an empty deal protocol list to the libp2p host if legacy deals are disabled - // These protocols are handled by Boost provider and all legacy deals are rejected - if !cfg.EnableLegacyStorageDeals { - opts = append(opts, smnet.SupportedDealProtocols([]protocol.ID{})) - } - - net := smnet.NewFromLibp2pHost(h, opts...) - - dir := filepath.Join(r.Path(), lotus_modules.StagingAreaDirName) - err := os.MkdirAll(dir, os.ModePerm) - if err != nil { - return nil, fmt.Errorf("creating directory for staging legacy markets deals %s: %w", dir, err) - } - - store, err := piecefilestore.NewLocalFileStore(piecefilestore.OsPath(dir)) - if err != nil { - return nil, err - } - - opt := storageimpl.CustomDealDecisionLogic(df) - - return storageimpl.NewProvider( - net, - namespace.Wrap(ds, datastore.NewKey("/deals/provider")), - store, - dsw, - indexer, - pieceStore, - dataTransfer, - spn, - address.Address(minerAddress), - storedAsk, - meshCreator, - opt, - ) -} - -func DealDeciderFn(df lotus_dtypes.StorageDealFilter) storageimpl.DealDeciderFunc { - return func(ctx context.Context, deal storagemarket.MinerDeal) (bool, string, error) { - return df(ctx, toLotusGFMMinerDeal(deal)) - } -} - -func toLotusGFMMinerDeal(deal storagemarket.MinerDeal) lotus_gfm_storagemarket.MinerDeal { - lotusGFMDeal := lotus_gfm_storagemarket.MinerDeal{ - ClientDealProposal: deal.ClientDealProposal, - ProposalCid: deal.ProposalCid, - AddFundsCid: deal.AddFundsCid, - PublishCid: deal.PublishCid, - Miner: deal.Miner, - Client: deal.Client, - State: deal.State, - PiecePath: lotus_gfm_filestore.Path(deal.PiecePath), - MetadataPath: lotus_gfm_filestore.Path(deal.MetadataPath), - SlashEpoch: deal.SlashEpoch, - FastRetrieval: deal.FastRetrieval, - Message: deal.Message, - FundsReserved: deal.FundsReserved, - AvailableForRetrieval: deal.AvailableForRetrieval, - DealID: deal.DealID, - CreationTime: deal.CreationTime, - SectorNumber: deal.SectorNumber, - InboundCAR: deal.InboundCAR, - } - if deal.Ref != nil { - lotusGFMDeal.Ref = &lotus_gfm_storagemarket.DataRef{ - TransferType: deal.Ref.TransferType, - Root: deal.Ref.Root, - PieceCid: deal.Ref.PieceCid, - PieceSize: deal.Ref.PieceSize, - RawBlockSize: deal.Ref.RawBlockSize, - } - } - if deal.TransferChannelId != nil { - lotusGFMDeal.TransferChannelId = &datatransferv2.ChannelID{ - Initiator: deal.TransferChannelId.Initiator, - Responder: deal.TransferChannelId.Responder, - ID: datatransferv2.TransferID(deal.TransferChannelId.ID), - } - } - return lotusGFMDeal -} diff --git a/node/modules/piecedirectory.go b/node/modules/piecedirectory.go index 53910af1d..6274293f8 100644 --- a/node/modules/piecedirectory.go +++ b/node/modules/piecedirectory.go @@ -5,21 +5,14 @@ import ( "fmt" "time" - "github.com/filecoin-project/boost-gfm/piecestore" - "github.com/filecoin-project/boost-gfm/shared" - "github.com/filecoin-project/boost-gfm/storagemarket" - "github.com/filecoin-project/boost-gfm/stores" "github.com/filecoin-project/boost/cmd/lib" "github.com/filecoin-project/boost/gql" "github.com/filecoin-project/boost/node/config" "github.com/filecoin-project/boost/piecedirectory" "github.com/filecoin-project/boost/sectorstatemgr" bdclient "github.com/filecoin-project/boostd-data/client" - "github.com/filecoin-project/boostd-data/model" "github.com/filecoin-project/boostd-data/svc" "github.com/filecoin-project/boostd-data/yugabyte" - "github.com/filecoin-project/dagstore" - "github.com/filecoin-project/dagstore/shard" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-jsonrpc" "github.com/filecoin-project/lotus/api" @@ -27,10 +20,8 @@ import ( "github.com/filecoin-project/lotus/node/modules/dtypes" lotus_dtypes "github.com/filecoin-project/lotus/node/modules/dtypes" lotus_repo "github.com/filecoin-project/lotus/node/repo" - bstore "github.com/ipfs/boxo/blockstore" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" - carindex "github.com/ipld/go-car/v2/index" "go.uber.org/fx" ) @@ -86,7 +77,7 @@ func NewPieceDirectoryStore(cfg *config.Boost) func(lc fx.Lifecycle, r lotus_rep default: return fmt.Errorf("starting local index directory client: " + "neither yugabyte nor leveldb is enabled in config - " + - "you must explicitly configure either LocalIndexDirectory.Yugabyte "+ + "you must explicitly configure either LocalIndexDirectory.Yugabyte " + "or LocalIndexDirectory.Leveldb as the local index directory implementation") } @@ -157,10 +148,6 @@ func NewPieceDirectory(cfg *config.Boost) func(lc fx.Lifecycle, maddr dtypes.Min } } -func NewPieceStore(pm *piecedirectory.PieceDirectory, maddr address.Address) piecestore.PieceStore { - return &boostPieceStoreWrapper{piecedirectory: pm, maddr: maddr} -} - func NewPieceDoctor(lc fx.Lifecycle, maddr lotus_dtypes.MinerAddress, store *bdclient.Store, ssm *sectorstatemgr.SectorStateMgr, fullnodeApi api.FullNode) *piecedirectory.Doctor { doc := piecedirectory.NewDoctor(address.Address(maddr), store, ssm, fullnodeApi) docctx, cancel := context.WithCancel(context.Background()) @@ -177,143 +164,6 @@ func NewPieceDoctor(lc fx.Lifecycle, maddr lotus_dtypes.MinerAddress, store *bdc return doc } -type boostPieceStoreWrapper struct { - piecedirectory *piecedirectory.PieceDirectory - maddr address.Address -} - -func (pw *boostPieceStoreWrapper) Start(ctx context.Context) error { - return nil -} - -func (pw *boostPieceStoreWrapper) OnReady(ready shared.ReadyFunc) { - go ready(nil) -} - -func (pw *boostPieceStoreWrapper) AddDealForPiece(pieceCID cid.Cid, proposalCid cid.Cid, dealInfo piecestore.DealInfo) error { - di := model.DealInfo{ - DealUuid: proposalCid.String(), - IsLegacy: true, - ChainDealID: dealInfo.DealID, - MinerAddr: pw.maddr, - SectorID: dealInfo.SectorID, - PieceOffset: dealInfo.Offset, - PieceLength: dealInfo.Length, - // TODO: It would be nice if there's some way to figure out the CAR - // file size here (but I don't think there is an easy way in legacy - // markets without having access to the piece data itself) - CarLength: 0, - } - return pw.piecedirectory.AddDealForPiece(context.Background(), pieceCID, di) -} - -func (pw *boostPieceStoreWrapper) AddPieceBlockLocations(pieceCID cid.Cid, blockLocations map[cid.Cid]piecestore.BlockLocation) error { - // This method is no longer needed, we keep the CAR file index in the piece metadata store - return nil -} - -func (pw *boostPieceStoreWrapper) GetPieceInfo(pieceCID cid.Cid) (piecestore.PieceInfo, error) { - pieceDeals, err := pw.piecedirectory.GetPieceDeals(context.TODO(), pieceCID) - if err != nil { - return piecestore.PieceInfo{}, fmt.Errorf("getting piece deals from piece metadata store: %w", err) - } - - dis := make([]piecestore.DealInfo, 0, len(pieceDeals)) - for _, pd := range pieceDeals { - dis = append(dis, piecestore.DealInfo{ - DealID: pd.ChainDealID, - SectorID: pd.SectorID, - Offset: pd.PieceOffset, - Length: pd.PieceLength, - }) - } - return piecestore.PieceInfo{ - PieceCID: pieceCID, - Deals: dis, - }, nil -} - -func (pw *boostPieceStoreWrapper) GetCIDInfo(payloadCID cid.Cid) (piecestore.CIDInfo, error) { - // This is no longer used (CLI calls piece metadata store instead) - return piecestore.CIDInfo{}, nil -} - -func (pw *boostPieceStoreWrapper) ListCidInfoKeys() ([]cid.Cid, error) { - // This is no longer used (CLI calls piece metadata store instead) - return nil, nil -} - -func (pw *boostPieceStoreWrapper) ListPieceInfoKeys() ([]cid.Cid, error) { - // This is no longer used (CLI calls piece metadata store instead) - return nil, nil -} - -func NewDAGStoreWrapper(pm *piecedirectory.PieceDirectory) stores.DAGStoreWrapper { - // TODO: lotus_modules.NewStorageMarketProvider and lotus_modules.RetrievalProvider - // take a concrete *dagstore.Wrapper as a parameter. Create boost versions of these - // that instead take a stores.DAGStoreWrapper parameter - return &boostDAGStoreWrapper{piecedirectory: pm} -} - -type boostDAGStoreWrapper struct { - piecedirectory *piecedirectory.PieceDirectory -} - -func (dw *boostDAGStoreWrapper) DestroyShard(ctx context.Context, pieceCid cid.Cid, resch chan dagstore.ShardResult) error { - // This is no longer used (CLI calls piece metadata store instead) - return nil -} - -// Legacy markets calls piecestore.AddDealForPiece before RegisterShard, -// so we do the real work in AddDealForPiece. -func (dw *boostDAGStoreWrapper) RegisterShard(ctx context.Context, pieceCid cid.Cid, carPath string, eagerInit bool, resch chan dagstore.ShardResult) error { - res := dagstore.ShardResult{ - Key: shard.KeyFromCID(pieceCid), - Error: nil, - Accessor: nil, - } - - select { - case resch <- res: - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -func (dw *boostDAGStoreWrapper) LoadShard(ctx context.Context, pieceCid cid.Cid) (stores.ClosableBlockstore, error) { - bs, err := dw.piecedirectory.GetBlockstore(ctx, pieceCid) - if err != nil { - return nil, fmt.Errorf("getting blockstore in LoadShard: %w", err) - } - return closableBlockstore{Blockstore: bs}, nil -} - -func (dw *boostDAGStoreWrapper) MigrateDeals(ctx context.Context, deals []storagemarket.MinerDeal) (bool, error) { - // MigrateDeals is no longer needed - it's handled by the piece metadata store - return false, nil -} - -func (dw *boostDAGStoreWrapper) GetPiecesContainingBlock(blockCID cid.Cid) ([]cid.Cid, error) { - return dw.piecedirectory.PiecesContainingMultihash(context.TODO(), blockCID.Hash()) -} - -func (dw *boostDAGStoreWrapper) GetIterableIndexForPiece(pieceCid cid.Cid) (carindex.IterableIndex, error) { - return dw.piecedirectory.GetIterableIndex(context.TODO(), pieceCid) -} - -func (dw *boostDAGStoreWrapper) Close() error { - return nil -} - -type closableBlockstore struct { - bstore.Blockstore -} - -func (c closableBlockstore) Close() error { - return nil -} - func NewBlockGetter(pd *piecedirectory.PieceDirectory) gql.BlockGetter { return &pdBlockGetter{pd: pd} } diff --git a/node/modules/provider_data_transfer.go b/node/modules/provider_data_transfer.go deleted file mode 100644 index a27b3f657..000000000 --- a/node/modules/provider_data_transfer.go +++ /dev/null @@ -1,49 +0,0 @@ -package modules - -import ( - "context" - "errors" - "time" - - marketevents "github.com/filecoin-project/boost/markets/loggers" - "github.com/filecoin-project/boost/node/modules/dtypes" - dtimpl "github.com/filecoin-project/go-data-transfer/impl" - lotus_dtypes "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/node/repo" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" - "go.uber.org/fx" -) - -// NewProviderDataTransfer returns a data transfer manager -func NewProviderDataTransfer(lc fx.Lifecycle, net dtypes.ProviderTransferNetwork, transport dtypes.ProviderTransport, ds lotus_dtypes.MetadataDS, r repo.LockedRepo) (dtypes.ProviderDataTransfer, error) { - dtDs := namespace.Wrap(ds, datastore.NewKey("/datatransfer/provider/transfers")) - - dt, err := dtimpl.NewDataTransfer(dtDs, net, transport) - if err != nil { - return nil, err - } - - dt.OnReady(marketevents.ReadyLogger("provider data transfer")) - lc.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { - dt.SubscribeToEvents(marketevents.DataTransferLogger) - return dt.Start(ctx) - }, - OnStop: func(ctx context.Context) error { - errc := make(chan error) - - go func() { - errc <- dt.Stop(ctx) - }() - - select { - case err := <-errc: - return err - case <-time.After(5 * time.Second): - return errors.New("couldnt stop datatransfer.Manager in 5 seconds. forcing an App.Stop") - } - }, - }) - return dt, nil -} diff --git a/node/modules/provider_piece_store.go b/node/modules/provider_piece_store.go deleted file mode 100644 index d8fa71c74..000000000 --- a/node/modules/provider_piece_store.go +++ /dev/null @@ -1,25 +0,0 @@ -package modules - -import ( - "context" - "github.com/filecoin-project/boost/node/modules/dtypes" - - marketevents "github.com/filecoin-project/boost/markets/loggers" - "github.com/filecoin-project/boost/piecedirectory" - "github.com/filecoin-project/go-address" - lotus_dtypes "github.com/filecoin-project/lotus/node/modules/dtypes" - "go.uber.org/fx" -) - -// NewProviderPieceStore creates a statestore for storing metadata about pieces -// shared by the storage and retrieval providers -func NewProviderPieceStore(lc fx.Lifecycle, pm *piecedirectory.PieceDirectory, maddr lotus_dtypes.MinerAddress) (dtypes.ProviderPieceStore, error) { - ps := NewPieceStore(pm, address.Address(maddr)) - ps.OnReady(marketevents.ReadyLogger("piecestore")) - lc.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { - return ps.Start(ctx) - }, - }) - return ps, nil -} diff --git a/node/modules/retrieval.go b/node/modules/retrieval.go index 89a7f681e..d6d7773f8 100644 --- a/node/modules/retrieval.go +++ b/node/modules/retrieval.go @@ -7,11 +7,9 @@ import ( "path" "time" - lotus_retrievalmarket "github.com/filecoin-project/boost-gfm/retrievalmarket" "github.com/filecoin-project/boost/cmd/booster-bitswap/bitswap" "github.com/filecoin-project/boost/db" "github.com/filecoin-project/boost/node/config" - "github.com/filecoin-project/boost/node/modules/dtypes" "github.com/filecoin-project/boost/protocolproxy" "github.com/filecoin-project/boost/retrievalmarket/lp2pimpl" "github.com/filecoin-project/boost/retrievalmarket/rtvllog" @@ -156,19 +154,15 @@ func NewRetrievalLogDB(db *RetrievalSqlDB) *rtvllog.RetrievalLogDB { } // Write graphsync retrieval updates to the database -func HandleRetrievalGraphsyncUpdates(duration time.Duration, stalledDuration time.Duration) func(lc fx.Lifecycle, db *rtvllog.RetrievalLogDB, m lotus_retrievalmarket.RetrievalProvider, dt dtypes.ProviderDataTransfer, gsur *server.GraphsyncUnpaidRetrieval) { - return func(lc fx.Lifecycle, db *rtvllog.RetrievalLogDB, m lotus_retrievalmarket.RetrievalProvider, dt dtypes.ProviderDataTransfer, gsur *server.GraphsyncUnpaidRetrieval) { - rel := rtvllog.NewRetrievalLog(db, duration, dt, stalledDuration, gsur) +func HandleRetrievalGraphsyncUpdates(duration time.Duration, stalledDuration time.Duration) func(lc fx.Lifecycle, db *rtvllog.RetrievalLogDB, gsur *server.GraphsyncUnpaidRetrieval) { + return func(lc fx.Lifecycle, db *rtvllog.RetrievalLogDB, gsur *server.GraphsyncUnpaidRetrieval) { + rel := rtvllog.NewRetrievalLog(db, duration, stalledDuration, gsur) relctx, cancel := context.WithCancel(context.Background()) type unsubFn func() var unsubs []unsubFn lc.Append(fx.Hook{ OnStart: func(ctx context.Context) error { - unsubs = append(unsubs, unsubFn(m.SubscribeToEvents(rel.OnRetrievalEvent))) - unsubs = append(unsubs, unsubFn(m.SubscribeToQueryEvents(rel.OnQueryEvent))) - unsubs = append(unsubs, unsubFn(m.SubscribeToValidationEvents(rel.OnValidationEvent))) - unsubs = append(unsubs, unsubFn(dt.SubscribeToEvents(rel.OnDataTransferEvent))) unsubs = append(unsubs, unsubFn(gsur.SubscribeToDataTransferEvents(rel.OnDataTransferEvent))) unsubs = append(unsubs, unsubFn(gsur.SubscribeToMarketsEvents(rel.OnRetrievalEvent))) rel.Start(relctx) diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index efa1539d2..79270feae 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -9,31 +9,21 @@ import ( "path" "time" - "github.com/filecoin-project/boost-gfm/retrievalmarket" - retrievalimpl "github.com/filecoin-project/boost-gfm/retrievalmarket/impl" - rmnet "github.com/filecoin-project/boost-gfm/retrievalmarket/network" - "github.com/filecoin-project/boost-gfm/shared" - gfm_storagemarket "github.com/filecoin-project/boost-gfm/storagemarket" - storageimpl "github.com/filecoin-project/boost-gfm/storagemarket/impl" - "github.com/filecoin-project/boost-gfm/storagemarket/impl/storedask" - "github.com/filecoin-project/boost-gfm/stores" "github.com/filecoin-project/boost/cmd/lib" + dtnet "github.com/filecoin-project/boost/datatransfer/network" + dtgstransport "github.com/filecoin-project/boost/datatransfer/transport/graphsync" "github.com/filecoin-project/boost/db" "github.com/filecoin-project/boost/fundmanager" "github.com/filecoin-project/boost/gql" "github.com/filecoin-project/boost/indexprovider" "github.com/filecoin-project/boost/lib/legacy" "github.com/filecoin-project/boost/lib/mpoolmonitor" - "github.com/filecoin-project/boost/markets/idxprov" - marketevents "github.com/filecoin-project/boost/markets/loggers" - "github.com/filecoin-project/boost/markets/pricing" "github.com/filecoin-project/boost/markets/sectoraccessor" "github.com/filecoin-project/boost/markets/storageadapter" "github.com/filecoin-project/boost/node/config" "github.com/filecoin-project/boost/node/impl/backupmgr" "github.com/filecoin-project/boost/node/modules/dtypes" "github.com/filecoin-project/boost/piecedirectory" - brm "github.com/filecoin-project/boost/retrievalmarket/lib" "github.com/filecoin-project/boost/retrievalmarket/rtvllog" "github.com/filecoin-project/boost/retrievalmarket/server" "github.com/filecoin-project/boost/sectorstatemgr" @@ -42,30 +32,24 @@ import ( "github.com/filecoin-project/boost/storagemarket/logs" "github.com/filecoin-project/boost/storagemarket/lp2pimpl" "github.com/filecoin-project/boost/storagemarket/sealingpipeline" + "github.com/filecoin-project/boost/storagemarket/storedask" "github.com/filecoin-project/boost/storagemarket/types" + "github.com/filecoin-project/boost/storagemarket/types/legacytypes" "github.com/filecoin-project/boost/transport/httptransport" "github.com/filecoin-project/boostd-data/shared/tracing" - "github.com/filecoin-project/dagstore" - "github.com/filecoin-project/dagstore/indexbs" - "github.com/filecoin-project/dagstore/shard" "github.com/filecoin-project/go-address" - dtnet "github.com/filecoin-project/go-data-transfer/network" - dtgstransport "github.com/filecoin-project/go-data-transfer/transport/graphsync" - lotus_gfm_shared "github.com/filecoin-project/go-fil-markets/shared" - lotus_gfm_storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" + vfsm "github.com/filecoin-project/go-ds-versioning/pkg/fsm" "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/builtin/v8/verifreg" "github.com/filecoin-project/go-state-types/builtin/v9/account" - "github.com/filecoin-project/go-state-types/builtin/v9/market" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-statemachine/fsm" "github.com/filecoin-project/lotus/api/v1api" "github.com/filecoin-project/lotus/build" ctypes "github.com/filecoin-project/lotus/chain/types" ltypes "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/gateway" + "github.com/filecoin-project/lotus/lib/backupds" "github.com/filecoin-project/lotus/lib/sigs" lotus_dtypes "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/helpers" @@ -75,7 +59,6 @@ import ( "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/namespace" - "github.com/ipni/go-libipni/metadata" provider "github.com/ipni/index-provider" "github.com/libp2p/go-libp2p/core/host" "go.uber.org/fx" @@ -280,14 +263,6 @@ func mutateCfg(r lotus_repo.LockedRepo, mutator func(*config.Boost)) error { return multierr.Combine(typeErr, setConfigErr) } -func StorageNetworkName(ctx helpers.MetricsCtx, a v1api.FullNode) (dtypes.NetworkName, error) { - n, err := a.StateNetworkName(ctx) - if err != nil { - return "", err - } - return dtypes.NetworkName(n), nil -} - func NewBoostDB(r lotus_repo.LockedRepo) (*sql.DB, error) { // fixes error "database is locked", caused by concurrent access from deal goroutines to a single sqlite3 db connection // see: https://github.com/mattn/go-sqlite3#:~:text=Error%3A%20database%20is%20locked @@ -326,21 +301,11 @@ func NewFundsDB(sqldb *sql.DB) *db.FundsDB { return db.NewFundsDB(sqldb) } -func HandleRetrieval(host host.Host, lc fx.Lifecycle, m retrievalmarket.RetrievalProvider) { - m.OnReady(marketevents.ReadyLogger("retrieval provider")) - lc.Append(fx.Hook{ - - OnStart: func(ctx context.Context) error { - m.SubscribeToEvents(marketevents.RetrievalProviderLogger) - return m.Start(ctx) - }, - OnStop: func(context.Context) error { - return m.Stop() - }, - }) +func NewAskDB(sqldb *sql.DB) *db.StorageAskDB { + return db.NewStorageAskDB(sqldb) } -func HandleQueryAsk(lc fx.Lifecycle, h host.Host, maddr lotus_dtypes.MinerAddress, pd *piecedirectory.PieceDirectory, sa *lib.MultiMinerAccessor, askStore server.AskGetter, full v1api.FullNode) { +func HandleQueryAsk(lc fx.Lifecycle, h host.Host, maddr lotus_dtypes.MinerAddress, pd *piecedirectory.PieceDirectory, sa *lib.MultiMinerAccessor, askStore RetrievalAskGetter, full v1api.FullNode) { handler := server.NewQueryAskHandler(h, address.Address(maddr), pd, sa, askStore, full) lc.Append(fx.Hook{ OnStart: func(ctx context.Context) error { @@ -358,47 +323,16 @@ func NewSectorStateDB(sqldb *sql.DB) *db.SectorStateDB { return db.NewSectorStateDB(sqldb) } -func HandleLegacyDeals(mctx helpers.MetricsCtx, lc fx.Lifecycle, lsp gfm_storagemarket.StorageProvider) error { - log.Info("starting legacy storage provider") - ctx := helpers.LifecycleCtx(mctx, lc) - lsp.OnReady(marketevents.ReadyLogger("storage provider")) - lc.Append(fx.Hook{ - OnStart: func(context.Context) error { - lsp.SubscribeToEvents(marketevents.StorageProviderLogger) - return lsp.Start(ctx) - }, - OnStop: func(context.Context) error { - return lsp.Stop() - }, - }) - return nil -} - -func HandleBoostLibp2pDeals(cfg *config.Boost) func(lc fx.Lifecycle, h host.Host, prov *storagemarket.Provider, a v1api.FullNode, legacySP gfm_storagemarket.StorageProvider, idxProv *indexprovider.Wrapper, plDB *db.ProposalLogsDB, spApi sealingpipeline.API) { - return func(lc fx.Lifecycle, h host.Host, prov *storagemarket.Provider, a v1api.FullNode, legacySP gfm_storagemarket.StorageProvider, idxProv *indexprovider.Wrapper, plDB *db.ProposalLogsDB, spApi sealingpipeline.API) { +func HandleBoostLibp2pDeals(cfg *config.Boost) func(lc fx.Lifecycle, h host.Host, prov *storagemarket.Provider, a v1api.FullNode, idxProv *indexprovider.Wrapper, plDB *db.ProposalLogsDB, spApi sealingpipeline.API) { + return func(lc fx.Lifecycle, h host.Host, prov *storagemarket.Provider, a v1api.FullNode, idxProv *indexprovider.Wrapper, plDB *db.ProposalLogsDB, spApi sealingpipeline.API) { lp2pnet := lp2pimpl.NewDealProvider(h, prov, a, plDB, spApi, cfg.Dealmaking.EnableLegacyStorageDeals) lc.Append(fx.Hook{ OnStart: func(ctx context.Context) error { - // Wait for the legacy SP to fire the "ready" event before starting - // the boost SP. - // Boost overrides some listeners so it must start after the legacy SP. - errch := make(chan error, 1) - log.Info("waiting for legacy storage provider 'ready' event") - legacySP.OnReady(func(err error) { - errch <- err - }) - err := <-errch - if err != nil { - log.Errorf("failed to start legacy storage provider: %w", err) - return err - } - log.Info("legacy storage provider started successfully") - // Start the Boost SP log.Info("starting boost storage provider") - err = prov.Start() + err := prov.Start() if err != nil { return fmt.Errorf("starting storage provider: %w", err) } @@ -519,9 +453,9 @@ func NewChainDealManager(a v1api.FullNode) *storagemarket.ChainDealManager { return storagemarket.NewChainDealManager(a, cdmCfg) } -func NewLegacyDealsManager(lc fx.Lifecycle, legacyProv gfm_storagemarket.StorageProvider) *legacy.LegacyDealsManager { +func NewLegacyDealsManager(lc fx.Lifecycle, legacyFSM fsm.Group) legacy.LegacyDealManager { ctx, cancel := context.WithCancel(context.Background()) - mgr := legacy.NewLegacyDealsManager(legacyProv) + mgr := legacy.NewLegacyDealsManager(legacyFSM) lc.Append(fx.Hook{ OnStart: func(_ context.Context) error { go mgr.Run(ctx) @@ -535,78 +469,12 @@ func NewLegacyDealsManager(lc fx.Lifecycle, legacyProv gfm_storagemarket.Storage return mgr } -func NewStorageAsk(ctx helpers.MetricsCtx, fapi v1api.FullNode, ds lotus_dtypes.MetadataDS, minerAddress lotus_dtypes.MinerAddress, spn gfm_storagemarket.StorageProviderNode) (*storedask.StoredAsk, error) { - mi, err := fapi.StateMinerInfo(ctx, address.Address(minerAddress), ltypes.EmptyTSK) - if err != nil { - return nil, err - } - - providerDs := namespace.Wrap(ds, datastore.NewKey("/deals/provider")) - // legacy this was mistake where this key was place -- so we move the legacy key if need be - err = shared.MoveKey(providerDs, "/latest-ask", "/storage-ask/latest") - if err != nil { - return nil, err - } - return storedask.NewStoredAsk(namespace.Wrap(providerDs, datastore.NewKey("/storage-ask")), datastore.NewKey("latest"), spn, address.Address(minerAddress), - gfm_storagemarket.MaxPieceSize(abi.PaddedPieceSize(mi.SectorSize))) -} - -// NewLegacyStorageProvider wraps lotus's storage provider function but additionally sets up the metadata announcement -// for legacy deals based off of Boost's configured protocols -func NewLegacyStorageProvider(cfg *config.Boost) func(minerAddress lotus_dtypes.MinerAddress, - storedAsk *storedask.StoredAsk, - h host.Host, ds lotus_dtypes.MetadataDS, - r repo.LockedRepo, - pieceStore dtypes.ProviderPieceStore, - indexer provider.Interface, - dataTransfer dtypes.ProviderDataTransfer, - spn gfm_storagemarket.StorageProviderNode, - df storageimpl.DealDeciderFunc, - dsw stores.DAGStoreWrapper, - meshCreator idxprov.MeshCreator, -) (gfm_storagemarket.StorageProvider, error) { - return func(minerAddress lotus_dtypes.MinerAddress, - storedAsk *storedask.StoredAsk, - h host.Host, ds lotus_dtypes.MetadataDS, - r repo.LockedRepo, - pieceStore dtypes.ProviderPieceStore, - indexer provider.Interface, - dataTransfer dtypes.ProviderDataTransfer, - spn gfm_storagemarket.StorageProviderNode, - df storageimpl.DealDeciderFunc, - dsw stores.DAGStoreWrapper, - meshCreator idxprov.MeshCreator, - ) (gfm_storagemarket.StorageProvider, error) { - prov, err := StorageProvider(minerAddress, storedAsk, h, ds, r, pieceStore, indexer, dataTransfer, spn, df, dsw, meshCreator, cfg.Dealmaking) - if err != nil { - return prov, err - } - p := prov.(*storageimpl.Provider) - p.Configure(storageimpl.CustomMetadataGenerator(func(deal gfm_storagemarket.MinerDeal) metadata.Metadata { - - // Announce deal to network Indexer - protocols := []metadata.Protocol{ - &metadata.GraphsyncFilecoinV1{ - PieceCID: deal.Proposal.PieceCID, - FastRetrieval: deal.FastRetrieval, - VerifiedDeal: deal.Proposal.VerifiedDeal, - }, - } - - return metadata.Default.New(protocols...) - - })) - return p, nil - } -} - -func NewStorageMarketProvider(provAddr address.Address, cfg *config.Boost) func(lc fx.Lifecycle, h host.Host, a v1api.FullNode, sqldb *sql.DB, dealsDB *db.DealsDB, fundMgr *fundmanager.FundManager, storageMgr *storagemanager.StorageManager, dp *storageadapter.DealPublisher, secb *sectorblocks.SectorBlocks, commpc types.CommpCalculator, sps sealingpipeline.API, df dtypes.StorageDealFilter, logsSqlDB *LogSqlDB, logsDB *db.LogsDB, piecedirectory *piecedirectory.PieceDirectory, ip *indexprovider.Wrapper, lp gfm_storagemarket.StorageProvider, cdm *storagemarket.ChainDealManager) (*storagemarket.Provider, error) { +func NewStorageMarketProvider(provAddr address.Address, cfg *config.Boost) func(lc fx.Lifecycle, h host.Host, a v1api.FullNode, sqldb *sql.DB, dealsDB *db.DealsDB, fundMgr *fundmanager.FundManager, storageMgr *storagemanager.StorageManager, sask *storedask.StoredAsk, dp *storageadapter.DealPublisher, secb *sectorblocks.SectorBlocks, commpc types.CommpCalculator, sps sealingpipeline.API, df dtypes.StorageDealFilter, logsSqlDB *LogSqlDB, logsDB *db.LogsDB, piecedirectory *piecedirectory.PieceDirectory, ip *indexprovider.Wrapper, cdm *storagemarket.ChainDealManager) (*storagemarket.Provider, error) { return func(lc fx.Lifecycle, h host.Host, a v1api.FullNode, sqldb *sql.DB, dealsDB *db.DealsDB, - fundMgr *fundmanager.FundManager, storageMgr *storagemanager.StorageManager, dp *storageadapter.DealPublisher, secb *sectorblocks.SectorBlocks, + fundMgr *fundmanager.FundManager, storageMgr *storagemanager.StorageManager, sask *storedask.StoredAsk, dp *storageadapter.DealPublisher, secb *sectorblocks.SectorBlocks, commpc types.CommpCalculator, sps sealingpipeline.API, df dtypes.StorageDealFilter, logsSqlDB *LogSqlDB, logsDB *db.LogsDB, - piecedirectory *piecedirectory.PieceDirectory, ip *indexprovider.Wrapper, - lp gfm_storagemarket.StorageProvider, cdm *storagemarket.ChainDealManager) (*storagemarket.Provider, error) { + piecedirectory *piecedirectory.PieceDirectory, ip *indexprovider.Wrapper, cdm *storagemarket.ChainDealManager) (*storagemarket.Provider, error) { prvCfg := storagemarket.Config{ MaxTransferDuration: time.Duration(cfg.Dealmaking.MaxTransferDuration), @@ -624,7 +492,7 @@ func NewStorageMarketProvider(provAddr address.Address, cfg *config.Boost) func( dl := logs.NewDealLogger(logsDB) tspt := httptransport.New(h, dl, httptransport.NChunksOpt(cfg.HttpDownload.NChunks), httptransport.AllowPrivateIPsOpt(cfg.HttpDownload.AllowPrivateIPs)) prov, err := storagemarket.NewProvider(prvCfg, sqldb, dealsDB, fundMgr, storageMgr, a, dp, provAddr, secb, commpc, - sps, cdm, df, logsSqlDB.db, logsDB, piecedirectory, ip, lp, &signatureVerifier{a}, dl, tspt) + sps, cdm, df, logsSqlDB.db, logsDB, piecedirectory, ip, sask, &signatureVerifier{a}, dl, tspt) if err != nil { return nil, err } @@ -633,16 +501,15 @@ func NewStorageMarketProvider(provAddr address.Address, cfg *config.Boost) func( } } -func NewGraphqlServer(cfg *config.Boost) func(lc fx.Lifecycle, r repo.LockedRepo, h host.Host, prov *storagemarket.Provider, dealsDB *db.DealsDB, logsDB *db.LogsDB, retDB *rtvllog.RetrievalLogDB, plDB *db.ProposalLogsDB, fundsDB *db.FundsDB, fundMgr *fundmanager.FundManager, storageMgr *storagemanager.StorageManager, publisher *storageadapter.DealPublisher, spApi sealingpipeline.API, legacyDeals *legacy.LegacyDealsManager, legacyProv gfm_storagemarket.StorageProvider, legacyDT dtypes.ProviderDataTransfer, ps dtypes.ProviderPieceStore, piecedirectory *piecedirectory.PieceDirectory, indexProv provider.Interface, idxProvWrapper *indexprovider.Wrapper, fullNode v1api.FullNode, bg gql.BlockGetter, ssm *sectorstatemgr.SectorStateMgr, mpool *mpoolmonitor.MpoolMonitor, mma *lib.MultiMinerAccessor) *gql.Server { +func NewGraphqlServer(cfg *config.Boost) func(lc fx.Lifecycle, r repo.LockedRepo, h host.Host, prov *storagemarket.Provider, dealsDB *db.DealsDB, logsDB *db.LogsDB, retDB *rtvllog.RetrievalLogDB, plDB *db.ProposalLogsDB, fundsDB *db.FundsDB, fundMgr *fundmanager.FundManager, storageMgr *storagemanager.StorageManager, publisher *storageadapter.DealPublisher, spApi sealingpipeline.API, legacyDeals legacy.LegacyDealManager, piecedirectory *piecedirectory.PieceDirectory, indexProv provider.Interface, idxProvWrapper *indexprovider.Wrapper, fullNode v1api.FullNode, bg gql.BlockGetter, ssm *sectorstatemgr.SectorStateMgr, mpool *mpoolmonitor.MpoolMonitor, mma *lib.MultiMinerAccessor, sask *storedask.StoredAsk) *gql.Server { return func(lc fx.Lifecycle, r repo.LockedRepo, h host.Host, prov *storagemarket.Provider, dealsDB *db.DealsDB, logsDB *db.LogsDB, retDB *rtvllog.RetrievalLogDB, plDB *db.ProposalLogsDB, fundsDB *db.FundsDB, fundMgr *fundmanager.FundManager, storageMgr *storagemanager.StorageManager, publisher *storageadapter.DealPublisher, spApi sealingpipeline.API, - legacyDeals *legacy.LegacyDealsManager, legacyProv gfm_storagemarket.StorageProvider, legacyDT dtypes.ProviderDataTransfer, - ps dtypes.ProviderPieceStore, piecedirectory *piecedirectory.PieceDirectory, + legacyDeals legacy.LegacyDealManager, piecedirectory *piecedirectory.PieceDirectory, indexProv provider.Interface, idxProvWrapper *indexprovider.Wrapper, fullNode v1api.FullNode, bg gql.BlockGetter, - ssm *sectorstatemgr.SectorStateMgr, mpool *mpoolmonitor.MpoolMonitor, mma *lib.MultiMinerAccessor) *gql.Server { + ssm *sectorstatemgr.SectorStateMgr, mpool *mpoolmonitor.MpoolMonitor, mma *lib.MultiMinerAccessor, sask *storedask.StoredAsk) *gql.Server { resolverCtx, cancel := context.WithCancel(context.Background()) - resolver := gql.NewResolver(resolverCtx, cfg, r, h, dealsDB, logsDB, retDB, plDB, fundsDB, fundMgr, storageMgr, spApi, prov, legacyDeals, legacyProv, legacyDT, ps, piecedirectory, publisher, indexProv, idxProvWrapper, fullNode, ssm, mpool, mma) + resolver := gql.NewResolver(resolverCtx, cfg, r, h, dealsDB, logsDB, retDB, plDB, fundsDB, fundMgr, storageMgr, spApi, prov, legacyDeals, piecedirectory, publisher, indexProv, idxProvWrapper, fullNode, ssm, mpool, mma, sask) svr := gql.NewServer(cfg, resolver, bg) lc.Append(fx.Hook{ @@ -665,74 +532,6 @@ func NewSectorAccessor(cfg *config.Boost) sectoraccessor.SectorAccessorConstruct return sectoraccessor.NewCachingSectorAccessor(maxCacheSize, time.Duration(cfg.Dealmaking.IsUnsealedCacheExpiry)) } -// ShardSelector helps to resolve a circular dependency: -// The IndexBackedBlockstore has a shard selector, which needs to query the -// RetrievalProviderNode's ask to find out if it's free to retrieve a -// particular piece. -// However the RetrievalProviderNode depends on the DAGStore which depends on -// IndexBackedBlockstore. -// So we -// - create a ShardSelector that has no dependencies with a default shard -// selection function that just selects no shards -// - later call SetShardSelectorFunc to create a real shard selector function -// with all its dependencies, and set it on the ShardSelector object. -type ShardSelector struct { - Proxy indexbs.ShardSelectorF - Target indexbs.ShardSelectorF -} - -func NewShardSelector() *ShardSelector { - ss := &ShardSelector{ - // The default target function always selects no shards - Target: func(c cid.Cid, shards []shard.Key) (shard.Key, error) { - return shard.Key{}, indexbs.ErrNoShardSelected - }, - } - ss.Proxy = func(c cid.Cid, shards []shard.Key) (shard.Key, error) { - return ss.Target(c, shards) - } - - return ss -} - -func SetShardSelectorFunc(lc fx.Lifecycle, shardSelector *ShardSelector, ps dtypes.ProviderPieceStore, sa retrievalmarket.SectorAccessor, rp retrievalmarket.RetrievalProvider) error { - ctx, cancel := context.WithCancel(context.Background()) - lc.Append(fx.Hook{ - OnStop: func(ctx context.Context) error { - cancel() - return nil - }, - }) - - ss, err := brm.NewShardSelector(ctx, ps, sa, rp) - if err != nil { - return fmt.Errorf("creating shard selector: %w", err) - } - - shardSelector.Target = ss.ShardSelectorF - - return nil -} - -func NewIndexBackedBlockstore(cfg *config.Boost) func(lc fx.Lifecycle, dagst dagstore.Interface, ss *ShardSelector) (dtypes.IndexBackedBlockstore, error) { - return func(lc fx.Lifecycle, dagst dagstore.Interface, ss *ShardSelector) (dtypes.IndexBackedBlockstore, error) { - ctx, cancel := context.WithCancel(context.Background()) - lc.Append(fx.Hook{ - OnStop: func(ctx context.Context) error { - cancel() - return nil - }, - }) - - ibsds := brm.NewIndexBackedBlockstoreDagstore(dagst) - rbs, err := indexbs.NewIndexBackedBlockstore(ctx, ibsds, ss.Proxy, cfg.Dealmaking.BlockstoreCacheMaxShards, time.Duration(cfg.Dealmaking.BlockstoreCacheExpiry)) - if err != nil { - return nil, fmt.Errorf("failed to create index backed blockstore: %w", err) - } - return dtypes.IndexBackedBlockstore(rbs), nil - } -} - func NewTracing(cfg *config.Boost) func(lc fx.Lifecycle) (*tracing.Tracing, error) { return func(lc fx.Lifecycle) (*tracing.Tracing, error) { if cfg.Tracing.Enabled { @@ -767,123 +566,6 @@ func NewProviderTransport(h host.Host, gs dtypes.StagingGraphsync) dtypes.Provid return dtgstransport.NewTransport(h.ID(), gs) } -func RetrievalNetwork(h host.Host) rmnet.RetrievalMarketNetwork { - return rmnet.NewFromLibp2pHost(h) -} - -// RetrievalPricingFunc configures the pricing function to use for retrieval deals. -func RetrievalPricingFunc(cfg config.DealmakingConfig) func(_ dtypes.ConsiderOnlineRetrievalDealsConfigFunc, - _ dtypes.ConsiderOfflineRetrievalDealsConfigFunc) dtypes.RetrievalPricingFunc { - - return func(_ dtypes.ConsiderOnlineRetrievalDealsConfigFunc, - _ dtypes.ConsiderOfflineRetrievalDealsConfigFunc) dtypes.RetrievalPricingFunc { - if cfg.RetrievalPricing.Strategy == config.RetrievalPricingExternalMode { - return pricing.ExternalRetrievalPricingFunc(cfg.RetrievalPricing.External.Path) - } - - return retrievalimpl.DefaultPricingFunc(cfg.RetrievalPricing.Default.VerifiedDealsFreeTransfer) - } -} - -// RetrievalProvider creates a new retrieval provider attached to the provider blockstore -func RetrievalProvider( - maddr lotus_dtypes.MinerAddress, - adapter retrievalmarket.RetrievalProviderNode, - sa retrievalmarket.SectorAccessor, - netwk rmnet.RetrievalMarketNetwork, - ds lotus_dtypes.MetadataDS, - pieceStore dtypes.ProviderPieceStore, - dt dtypes.ProviderDataTransfer, - pricingFnc dtypes.RetrievalPricingFunc, - userFilter dtypes.RetrievalDealFilter, - dagStore stores.DAGStoreWrapper, -) (retrievalmarket.RetrievalProvider, error) { - opt := retrievalimpl.DealDeciderOpt(retrievalimpl.DealDecider(userFilter)) - - retrievalmarket.DefaultPricePerByte = big.Zero() // todo: for whatever reason this is a global var in markets - - return retrievalimpl.NewProvider( - address.Address(maddr), - adapter, - sa, - netwk, - pieceStore, - dagStore, - dt, - namespace.Wrap(ds, datastore.NewKey("/retrievals/provider")), - retrievalimpl.RetrievalPricingFunc(pricingFnc), - opt, - ) -} - -func LotusGFMStorageProviderNode(spn gfm_storagemarket.StorageProviderNode) lotus_gfm_storagemarket.StorageProviderNode { - return &lotusGFMSPN{StorageProviderNode: spn} -} - -type lotusGFMSPN struct { - gfm_storagemarket.StorageProviderNode -} - -func (l *lotusGFMSPN) GetChainHead(ctx context.Context) (lotus_gfm_shared.TipSetToken, abi.ChainEpoch, error) { - tst, ce, err := l.StorageProviderNode.GetChainHead(ctx) - return lotus_gfm_shared.TipSetToken(tst), ce, err -} - -func (l *lotusGFMSPN) GetBalance(ctx context.Context, addr address.Address, tok lotus_gfm_shared.TipSetToken) (lotus_gfm_storagemarket.Balance, error) { - //TODO implement me - panic("implement me") -} - -func (l *lotusGFMSPN) VerifySignature(ctx context.Context, signature crypto.Signature, signer address.Address, plaintext []byte, tok lotus_gfm_shared.TipSetToken) (bool, error) { - //TODO implement me - panic("implement me") -} - -func (l *lotusGFMSPN) OnDealSectorPreCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, proposal market.DealProposal, publishCid *cid.Cid, cb lotus_gfm_storagemarket.DealSectorPreCommittedCallback) error { - //TODO implement me - panic("implement me") -} - -func (l *lotusGFMSPN) OnDealSectorCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, sectorNumber abi.SectorNumber, proposal market.DealProposal, publishCid *cid.Cid, cb lotus_gfm_storagemarket.DealSectorCommittedCallback) error { - //TODO implement me - panic("implement me") -} - -func (l *lotusGFMSPN) OnDealExpiredOrSlashed(ctx context.Context, dealID abi.DealID, onDealExpired lotus_gfm_storagemarket.DealExpiredCallback, onDealSlashed lotus_gfm_storagemarket.DealSlashedCallback) error { - //TODO implement me - panic("implement me") -} - -func (l *lotusGFMSPN) PublishDeals(ctx context.Context, deal lotus_gfm_storagemarket.MinerDeal) (cid.Cid, error) { - //TODO implement me - panic("implement me") -} - -func (l *lotusGFMSPN) WaitForPublishDeals(ctx context.Context, mcid cid.Cid, proposal market.DealProposal) (*lotus_gfm_storagemarket.PublishDealsWaitResult, error) { - //TODO implement me - panic("implement me") -} - -func (l *lotusGFMSPN) OnDealComplete(ctx context.Context, deal lotus_gfm_storagemarket.MinerDeal, pieceSize abi.UnpaddedPieceSize, pieceReader lotus_gfm_shared.ReadSeekStarter) (*lotus_gfm_storagemarket.PackingResult, error) { - //TODO implement me - panic("implement me") -} - -func (l *lotusGFMSPN) GetMinerWorkerAddress(ctx context.Context, addr address.Address, tok lotus_gfm_shared.TipSetToken) (address.Address, error) { - //TODO implement me - panic("implement me") -} - -func (l *lotusGFMSPN) GetDataCap(ctx context.Context, addr address.Address, tok lotus_gfm_shared.TipSetToken) (*verifreg.DataCap, error) { - //TODO implement me - panic("implement me") -} - -func (l *lotusGFMSPN) GetProofType(ctx context.Context, addr address.Address, tok lotus_gfm_shared.TipSetToken) (abi.RegisteredSealProof, error) { - //TODO implement me - panic("implement me") -} - func NewMpoolMonitor(cfg *config.Boost) func(lc fx.Lifecycle, a v1api.FullNode) *mpoolmonitor.MpoolMonitor { return func(lc fx.Lifecycle, a v1api.FullNode) *mpoolmonitor.MpoolMonitor { mpm := mpoolmonitor.NewMonitor(a, cfg.Monitoring.MpoolAlertEpochs) @@ -896,3 +578,25 @@ func NewMpoolMonitor(cfg *config.Boost) func(lc fx.Lifecycle, a v1api.FullNode) return mpm } } + +func NewLegacyDealsFSM(cfg *config.Boost) func(lc fx.Lifecycle, ds *backupds.Datastore) (fsm.Group, error) { + return func(lc fx.Lifecycle, ds *backupds.Datastore) (fsm.Group, error) { + // Get the deals FSM + provDS := namespace.Wrap(ds, datastore.NewKey("/deals/provider")) + deals, migrate, err := vfsm.NewVersionedFSM(provDS, fsm.Parameters{ + StateType: legacytypes.MinerDeal{}, + StateKeyField: "State", + }, nil, "2") + if err != nil { + return nil, fmt.Errorf("reading legacy deals from datastore: %w", err) + } + ctx := context.Background() + + err = migrate(ctx) + if err != nil { + return nil, fmt.Errorf("running provider fsm migration script: %w", err) + } + + return deals, err + } +} diff --git a/node/modules/storageminer_dagstore.go b/node/modules/storageminer_dagstore.go deleted file mode 100644 index 27575467d..000000000 --- a/node/modules/storageminer_dagstore.go +++ /dev/null @@ -1,138 +0,0 @@ -package modules - -import ( - "context" - "github.com/filecoin-project/boost-gfm/piecestore" - "github.com/filecoin-project/boost-gfm/storagemarket" - "github.com/filecoin-project/boost-gfm/stores" - "github.com/filecoin-project/boost/node/modules/dtypes" - "github.com/filecoin-project/dagstore" - lotus_gfm_piecestore "github.com/filecoin-project/go-fil-markets/piecestore" - "github.com/filecoin-project/go-fil-markets/shared" - lotus_gfm_storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket" - mdagstore "github.com/filecoin-project/lotus/markets/dagstore" - lotus_dtypes "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/ipfs/go-cid" - "github.com/ipld/go-car/v2/index" -) - -func NewBoostGFMDAGStoreWrapper(w *mdagstore.Wrapper) stores.DAGStoreWrapper { - return &boostDagstoreWrapper{w: w} -} - -type boostDagstoreWrapper struct { - w *mdagstore.Wrapper -} - -func (b *boostDagstoreWrapper) RegisterShard(ctx context.Context, pieceCid cid.Cid, carPath string, eagerInit bool, resch chan dagstore.ShardResult) error { - return b.w.RegisterShard(ctx, pieceCid, carPath, eagerInit, resch) -} - -func (b *boostDagstoreWrapper) LoadShard(ctx context.Context, pieceCid cid.Cid) (stores.ClosableBlockstore, error) { - return b.w.LoadShard(ctx, pieceCid) -} - -func (b *boostDagstoreWrapper) MigrateDeals(ctx context.Context, deals []storagemarket.MinerDeal) (bool, error) { - dls := make([]lotus_gfm_storagemarket.MinerDeal, 0, len(deals)) - for _, d := range deals { - dls = append(dls, toLotusGFMMinerDeal(d)) - } - return b.w.MigrateDeals(ctx, dls) -} - -func (b *boostDagstoreWrapper) GetPiecesContainingBlock(blockCID cid.Cid) ([]cid.Cid, error) { - return b.w.GetPiecesContainingBlock(blockCID) -} - -func (b *boostDagstoreWrapper) GetIterableIndexForPiece(pieceCid cid.Cid) (index.IterableIndex, error) { - return b.w.GetIterableIndexForPiece(pieceCid) -} - -func (b *boostDagstoreWrapper) DestroyShard(ctx context.Context, pieceCid cid.Cid, resch chan dagstore.ShardResult) error { - return b.w.DestroyShard(ctx, pieceCid, resch) -} - -func (b *boostDagstoreWrapper) Close() error { - return b.w.Close() -} - -func NewLotusGFMProviderPieceStore(ps dtypes.ProviderPieceStore) lotus_dtypes.ProviderPieceStore { - return &lotusProviderPieceStore{ProviderPieceStore: ps} -} - -type lotusProviderPieceStore struct { - dtypes.ProviderPieceStore -} - -var _ lotus_dtypes.ProviderPieceStore = (*lotusProviderPieceStore)(nil) - -func (l *lotusProviderPieceStore) OnReady(ready shared.ReadyFunc) { - if ready == nil { - return - } - l.ProviderPieceStore.OnReady(func(err error) { - ready(err) - }) -} - -func (l *lotusProviderPieceStore) AddDealForPiece(pieceCID cid.Cid, payloadCid cid.Cid, dealInfo lotus_gfm_piecestore.DealInfo) error { - return l.ProviderPieceStore.AddDealForPiece(pieceCID, payloadCid, piecestore.DealInfo{ - DealID: dealInfo.DealID, - SectorID: dealInfo.SectorID, - Offset: dealInfo.Offset, - Length: dealInfo.Length, - }) -} - -func (l *lotusProviderPieceStore) AddPieceBlockLocations(pieceCID cid.Cid, blockLocations map[cid.Cid]lotus_gfm_piecestore.BlockLocation) error { - bls := make(map[cid.Cid]piecestore.BlockLocation, len(blockLocations)) - for c, bl := range blockLocations { - bls[c] = piecestore.BlockLocation{ - RelOffset: bl.RelOffset, - BlockSize: bl.BlockSize, - } - } - return l.ProviderPieceStore.AddPieceBlockLocations(pieceCID, bls) -} - -func (l *lotusProviderPieceStore) GetPieceInfo(pieceCID cid.Cid) (lotus_gfm_piecestore.PieceInfo, error) { - pi, err := l.ProviderPieceStore.GetPieceInfo(pieceCID) - if err != nil { - return lotus_gfm_piecestore.PieceInfo{}, err - } - dls := make([]lotus_gfm_piecestore.DealInfo, 0, len(pi.Deals)) - for _, d := range pi.Deals { - dls = append(dls, lotus_gfm_piecestore.DealInfo{ - DealID: d.DealID, - SectorID: d.SectorID, - Offset: d.Offset, - Length: d.Length, - }) - } - return lotus_gfm_piecestore.PieceInfo{ - PieceCID: pi.PieceCID, - Deals: dls, - }, nil -} - -func (l *lotusProviderPieceStore) GetCIDInfo(payloadCID cid.Cid) (lotus_gfm_piecestore.CIDInfo, error) { - ci, err := l.ProviderPieceStore.GetCIDInfo(payloadCID) - if err != nil { - return lotus_gfm_piecestore.CIDInfo{}, err - } - - bls := make([]lotus_gfm_piecestore.PieceBlockLocation, 0, len(ci.PieceBlockLocations)) - for _, bl := range ci.PieceBlockLocations { - bls = append(bls, lotus_gfm_piecestore.PieceBlockLocation{ - BlockLocation: lotus_gfm_piecestore.BlockLocation{ - RelOffset: bl.BlockLocation.RelOffset, - BlockSize: bl.BlockLocation.BlockSize, - }, - PieceCID: bl.PieceCID, - }) - } - return lotus_gfm_piecestore.CIDInfo{ - CID: ci.CID, - PieceBlockLocations: bls, - }, nil -} diff --git a/node/modules/storageminer_idxprov.go b/node/modules/storageminer_idxprov.go index ac4c40751..401bdcb99 100644 --- a/node/modules/storageminer_idxprov.go +++ b/node/modules/storageminer_idxprov.go @@ -3,28 +3,20 @@ package modules import ( "context" "fmt" + "github.com/filecoin-project/boost/build" "github.com/filecoin-project/boost/indexprovider" "github.com/filecoin-project/boost/node/config" "github.com/filecoin-project/boost/node/modules/dtypes" - "github.com/filecoin-project/boost/retrievalmarket/types" "github.com/filecoin-project/boost/util" "github.com/filecoin-project/go-address" - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-data-transfer/transport/graphsync" - datatransferv2 "github.com/filecoin-project/go-data-transfer/v2" lotus_dtypes "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/namespace" - "github.com/ipld/go-ipld-prime" - "github.com/ipld/go-ipld-prime/datamodel" - "github.com/ipni/go-libipni/dagsync/dtsync" provider "github.com/ipni/index-provider" "github.com/ipni/index-provider/engine" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/peer" "go.uber.org/fx" "golang.org/x/xerrors" ) @@ -36,14 +28,14 @@ type IdxProv struct { Datastore lotus_dtypes.MetadataDS } -func IndexProvider(cfg config.IndexProviderConfig) func(params IdxProv, marketHost host.Host, dt dtypes.ProviderDataTransfer, maddr lotus_dtypes.MinerAddress, ps *pubsub.PubSub, nn lotus_dtypes.NetworkName) (provider.Interface, error) { +func IndexProvider(cfg config.IndexProviderConfig) func(params IdxProv, marketHost host.Host, maddr lotus_dtypes.MinerAddress, ps *pubsub.PubSub, nn lotus_dtypes.NetworkName) (provider.Interface, error) { if !cfg.Enable { log.Warnf("Starting Boost with index provider disabled - no announcements will be made to the index provider") - return func(params IdxProv, marketHost host.Host, dt dtypes.ProviderDataTransfer, maddr lotus_dtypes.MinerAddress, ps *pubsub.PubSub, nn lotus_dtypes.NetworkName) (provider.Interface, error) { + return func(params IdxProv, marketHost host.Host, maddr lotus_dtypes.MinerAddress, ps *pubsub.PubSub, nn lotus_dtypes.NetworkName) (provider.Interface, error) { return indexprovider.NewDisabledIndexProvider(), nil } } - return func(args IdxProv, marketHost host.Host, dt dtypes.ProviderDataTransfer, maddr lotus_dtypes.MinerAddress, ps *pubsub.PubSub, nn lotus_dtypes.NetworkName) (provider.Interface, error) { + return func(args IdxProv, marketHost host.Host, maddr lotus_dtypes.MinerAddress, ps *pubsub.PubSub, nn lotus_dtypes.NetworkName) (provider.Interface, error) { topicName := cfg.TopicName // If indexer topic name is left empty, infer it from the network name. if topicName == "" { @@ -102,17 +94,8 @@ func IndexProvider(cfg config.IndexProviderConfig) func(params IdxProv, marketHo opts = append(opts, engine.WithDirectAnnounce(cfg.Announce.DirectAnnounceURLs...)) } - // Advertisements can be served over HTTP, HTTP over libp2p of over - // the data transfer protocol (on graphsync). - if cfg.DataTransferPublisher { - opts = append(opts, - engine.WithPublisherKind(engine.DataTransferPublisher), - engine.WithDataTransfer(dtV1ToIndexerDT(dt, func() ipld.LinkSystem { - return *e.LinkSystem() - })), - ) - llog = llog.With("extraGossipData", ma, "publisher", "data-transfer") - } else if cfg.HttpPublisher.Enabled { + // Advertisements can be served over HTTP or HTTP over libp2p. + if cfg.HttpPublisher.Enabled { announceAddr, err := util.ToHttpMultiaddr(cfg.HttpPublisher.PublicHostname, cfg.HttpPublisher.Port) if err != nil { return nil, fmt.Errorf("parsing HTTP Publisher hostname '%s' / port %d: %w", @@ -130,7 +113,7 @@ func IndexProvider(cfg config.IndexProviderConfig) func(params IdxProv, marketHo llog = llog.With("publisher", "http and libp2phttp", "announceAddr", announceAddr, "extraGossipData", ma) } } else { - // HTTP publisher not enabled, so use only libp2p unless using DataTransferPublisher. + // HTTP publisher not enabled, so use only libp2p opts = append(opts, engine.WithPublisherKind(engine.Libp2pPublisher)) llog = llog.With("publisher", "libp2phttp", "extraGossipData", ma) } @@ -168,156 +151,3 @@ func IndexProvider(cfg config.IndexProviderConfig) func(params IdxProv, marketHo return e, nil } } - -// The index provider needs to set up some go-data-transfer voucher code. -// Below we write a shim for the specific use case of index provider, that -// translates between the go-data-transfer v2 use case that the index provider -// implements and the go-data-transfer v1 code that boost imports. -func dtV1ToIndexerDT(dt dtypes.ProviderDataTransfer, linksys func() ipld.LinkSystem) datatransferv2.Manager { - return &indexerDT{dt: dt, linksys: linksys} -} - -type indexerDT struct { - dt dtypes.ProviderDataTransfer - linksys func() ipld.LinkSystem -} - -var _ datatransferv2.Manager = (*indexerDT)(nil) - -func (i *indexerDT) RegisterVoucherType(voucherType datatransferv2.TypeIdentifier, validator datatransferv2.RequestValidator) error { - if voucherType == dtsync.LegsVoucherType { - return i.dt.RegisterVoucherType(&types.LegsVoucherDTv1{}, &dtv1ReqValidator{v: validator}) - } - return fmt.Errorf("unrecognized voucher type: %s", voucherType) -} - -func (i *indexerDT) RegisterTransportConfigurer(voucherType datatransferv2.TypeIdentifier, configurer datatransferv2.TransportConfigurer) error { - if voucherType == dtsync.LegsVoucherType { - return i.dt.RegisterTransportConfigurer(&types.LegsVoucherDTv1{}, func(chid datatransfer.ChannelID, voucher datatransfer.Voucher, transport datatransfer.Transport) { - gsTransport, ok := transport.(*graphsync.Transport) - if ok { - err := gsTransport.UseStore(chid, i.linksys()) - if err != nil { - log.Warnf("setting store for legs voucher: %s", err) - } - } else { - log.Warnf("expected transport configurer to pass graphsync transport but got %T", transport) - } - }) - } - return fmt.Errorf("unrecognized voucher type: %s", voucherType) -} - -func (i *indexerDT) Start(ctx context.Context) error { - return fmt.Errorf("not implemented") -} - -func (i *indexerDT) OnReady(readyFunc datatransferv2.ReadyFunc) { -} - -func (i *indexerDT) Stop(ctx context.Context) error { - return fmt.Errorf("not implemented") -} - -func (i *indexerDT) OpenPushDataChannel(ctx context.Context, to peer.ID, voucher datatransferv2.TypedVoucher, baseCid cid.Cid, selector datamodel.Node, options ...datatransferv2.TransferOption) (datatransferv2.ChannelID, error) { - return datatransferv2.ChannelID{}, fmt.Errorf("not implemented") -} - -func (i *indexerDT) OpenPullDataChannel(ctx context.Context, to peer.ID, voucher datatransferv2.TypedVoucher, baseCid cid.Cid, selector datamodel.Node, options ...datatransferv2.TransferOption) (datatransferv2.ChannelID, error) { - return datatransferv2.ChannelID{}, fmt.Errorf("not implemented") -} - -func (i *indexerDT) SendVoucher(ctx context.Context, chid datatransferv2.ChannelID, voucher datatransferv2.TypedVoucher) error { - return fmt.Errorf("not implemented") -} - -func (i *indexerDT) SendVoucherResult(ctx context.Context, chid datatransferv2.ChannelID, voucherResult datatransferv2.TypedVoucher) error { - return fmt.Errorf("not implemented") -} - -func (i *indexerDT) UpdateValidationStatus(ctx context.Context, chid datatransferv2.ChannelID, validationResult datatransferv2.ValidationResult) error { - return fmt.Errorf("not implemented") -} - -func (i *indexerDT) CloseDataTransferChannel(ctx context.Context, chid datatransferv2.ChannelID) error { - return fmt.Errorf("not implemented") -} - -func (i *indexerDT) PauseDataTransferChannel(ctx context.Context, chid datatransferv2.ChannelID) error { - return fmt.Errorf("not implemented") -} - -func (i *indexerDT) ResumeDataTransferChannel(ctx context.Context, chid datatransferv2.ChannelID) error { - return fmt.Errorf("not implemented") -} - -func (i *indexerDT) TransferChannelStatus(ctx context.Context, x datatransferv2.ChannelID) datatransferv2.Status { - return 0 -} - -func (i *indexerDT) ChannelState(ctx context.Context, chid datatransferv2.ChannelID) (datatransferv2.ChannelState, error) { - return nil, fmt.Errorf("not implemented") -} - -func (i *indexerDT) SubscribeToEvents(subscriber datatransferv2.Subscriber) datatransferv2.Unsubscribe { - return func() {} -} - -func (i *indexerDT) InProgressChannels(ctx context.Context) (map[datatransferv2.ChannelID]datatransferv2.ChannelState, error) { - return nil, fmt.Errorf("not implemented") -} - -func (i *indexerDT) RestartDataTransferChannel(ctx context.Context, chid datatransferv2.ChannelID) error { - return fmt.Errorf("not implemented") -} - -type dtv1ReqValidator struct { - v datatransferv2.RequestValidator -} - -func (d *dtv1ReqValidator) ValidatePush(isRestart bool, chid datatransfer.ChannelID, sender peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) (datatransfer.VoucherResult, error) { - d2v := dtsync.BindnodeRegistry.TypeToNode(&voucher.(*types.LegsVoucherDTv1).Voucher) - res, err := d.v.ValidatePush(toChannelIDV2(chid), sender, d2v, baseCid, selector) - if err != nil { - return nil, err - } - if !res.Accepted { - return nil, datatransfer.ErrRejected - } - - return toVoucherResult(res) -} - -func (d *dtv1ReqValidator) ValidatePull(isRestart bool, chid datatransfer.ChannelID, receiver peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) (datatransfer.VoucherResult, error) { - d2v := dtsync.BindnodeRegistry.TypeToNode(&voucher.(*types.LegsVoucherDTv1).Voucher) - res, err := d.v.ValidatePull(toChannelIDV2(chid), receiver, d2v, baseCid, selector) - if err != nil { - return nil, err - } - if !res.Accepted { - return nil, datatransfer.ErrRejected - } - - return toVoucherResult(res) -} - -func toVoucherResult(res datatransferv2.ValidationResult) (datatransfer.VoucherResult, error) { - voucherResVoucher := res.VoucherResult.Voucher - vri, err := dtsync.BindnodeRegistry.TypeFromNode(voucherResVoucher, &dtsync.VoucherResult{}) - if err != nil { - return nil, fmt.Errorf("getting VoucherResult from ValidationResult: %w", err) - } - vr := vri.(*dtsync.VoucherResult) - if vr == nil { - return nil, fmt.Errorf("got nil VoucherResult from ValidationResult") - } - return &types.LegsVoucherResultDtv1{VoucherResult: *vr, VoucherType: res.VoucherResult.Type}, nil -} - -func toChannelIDV2(chid datatransfer.ChannelID) datatransferv2.ChannelID { - return datatransferv2.ChannelID{ - Initiator: chid.Initiator, - Responder: chid.Responder, - ID: datatransferv2.TransferID(chid.ID), - } -} diff --git a/retrievalmarket/client/client.go b/retrievalmarket/client/client.go index 982c28414..78ca1f28e 100644 --- a/retrievalmarket/client/client.go +++ b/retrievalmarket/client/client.go @@ -9,23 +9,24 @@ import ( "sync" "time" - "github.com/filecoin-project/boost-gfm/retrievalmarket" - "github.com/filecoin-project/boost-gfm/shared" gsimpl "github.com/filecoin-project/boost-graphsync/impl" gsnet "github.com/filecoin-project/boost-graphsync/network" "github.com/filecoin-project/boost-graphsync/storeutil" + datatransfer2 "github.com/filecoin-project/boost/datatransfer" + dtnet "github.com/filecoin-project/boost/datatransfer/network" + gst "github.com/filecoin-project/boost/datatransfer/transport/graphsync" + "github.com/filecoin-project/boost/markets/shared" + "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" "github.com/filecoin-project/go-address" cborutil "github.com/filecoin-project/go-cbor-util" - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-data-transfer/channelmonitor" - dtimpl "github.com/filecoin-project/go-data-transfer/impl" - dtnet "github.com/filecoin-project/go-data-transfer/network" - gst "github.com/filecoin-project/go-data-transfer/transport/graphsync" + + "github.com/filecoin-project/boost/datatransfer/channelmonitor" + dtimpl "github.com/filecoin-project/boost/datatransfer/impl" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/wallet" - blockstore "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/blockstore" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" logging "github.com/ipfs/go-log/v2" @@ -61,7 +62,7 @@ type Client struct { host host.Host ClientAddr address.Address blockstore blockstore.Blockstore - dataTransfer datatransfer.Manager + dataTransfer datatransfer2.Manager logRetrievalProgressEvents bool } @@ -76,7 +77,7 @@ type Config struct { Datastore datastore.Batching Host host.Host ChannelMonitorConfig channelmonitor.Config - RetrievalConfigurer datatransfer.TransportConfigurer + RetrievalConfigurer datatransfer2.TransportConfigurer LogRetrievalProgressEvents bool } @@ -138,23 +139,23 @@ func NewClientWithConfig(cfg *Config) (*Client, error) { return nil, err } - err = mgr.RegisterVoucherType(&retrievalmarket.DealProposal{}, nil) + err = mgr.RegisterVoucherType(&legacyretrievaltypes.DealProposal{}, nil) if err != nil { return nil, err } - err = mgr.RegisterVoucherType(&retrievalmarket.DealPayment{}, nil) + err = mgr.RegisterVoucherType(&legacyretrievaltypes.DealPayment{}, nil) if err != nil { return nil, err } - err = mgr.RegisterVoucherResultType(&retrievalmarket.DealResponse{}) + err = mgr.RegisterVoucherResultType(&legacyretrievaltypes.DealResponse{}) if err != nil { return nil, err } if cfg.RetrievalConfigurer != nil { - if err := mgr.RegisterTransportConfigurer(&retrievalmarket.DealProposal{}, cfg.RetrievalConfigurer); err != nil { + if err := mgr.RegisterTransportConfigurer(&legacyretrievaltypes.DealProposal{}, cfg.RetrievalConfigurer); err != nil { return nil, err } } @@ -297,7 +298,7 @@ func doRpc(ctx context.Context, s inet.Stream, req interface{}, resp interface{} return nil } -func (c *Client) RetrievalQuery(ctx context.Context, maddr address.Address, pcid cid.Cid) (*retrievalmarket.QueryResponse, error) { +func (c *Client) RetrievalQuery(ctx context.Context, maddr address.Address, pcid cid.Cid) (*legacyretrievaltypes.QueryResponse, error) { ctx, span := Tracer.Start(ctx, "retrievalQuery", trace.WithAttributes( attribute.Stringer("miner", maddr), )) @@ -316,11 +317,11 @@ func (c *Client) RetrievalQuery(ctx context.Context, maddr address.Address, pcid // We have connected - q := &retrievalmarket.Query{ + q := &legacyretrievaltypes.Query{ PayloadCID: pcid, } - var resp retrievalmarket.QueryResponse + var resp legacyretrievaltypes.QueryResponse if err := doRpc(ctx, s, q, &resp); err != nil { return nil, fmt.Errorf("retrieval query rpc: %w", err) } @@ -341,7 +342,7 @@ type RetrievalStats struct { func (c *Client) RetrieveContentWithProgressCallback( ctx context.Context, miner address.Address, - proposal *retrievalmarket.DealProposal, + proposal *legacyretrievaltypes.DealProposal, progressCallback func(bytesReceived uint64), ) (*RetrievalStats, error) { @@ -362,7 +363,7 @@ func (c *Client) retrieveContentFromPeerWithProgressCallback( ctx context.Context, peerID peer.ID, minerWallet address.Address, - proposal *retrievalmarket.DealProposal, + proposal *legacyretrievaltypes.DealProposal, progressCallback func(bytesReceived uint64), gracefulShutdownRequested <-chan struct{}, ) (*RetrievalStats, error) { @@ -380,7 +381,7 @@ func (c *Client) retrieveContentFromPeerWithProgressCallback( totalPayment := abi.NewTokenAmount(0) rootCid := proposal.PayloadCID - var chanid datatransfer.ChannelID + var chanid datatransfer2.ChannelID var chanidLk sync.Mutex pchRequired := !proposal.PricePerByte.IsZero() || !proposal.UnsealPrice.IsZero() @@ -409,8 +410,8 @@ func (c *Client) retrieveContentFromPeerWithProgressCallback( dealComplete := false receivedFirstByte := false - unsubscribe := c.dataTransfer.SubscribeToEvents(func(event datatransfer.Event, state datatransfer.ChannelState) { - // Copy chanid so it can be used later in the callback + unsubscribe := c.dataTransfer.SubscribeToEvents(func(event datatransfer2.Event, state datatransfer2.ChannelState) { + // Copy chanid so, it can be used later in the callback chanidLk.Lock() chanidCopy := chanid chanidLk.Unlock() @@ -424,24 +425,24 @@ func (c *Client) retrieveContentFromPeerWithProgressCallback( eventCodeNotHandled := false switch event.Code { - case datatransfer.Open: - case datatransfer.Accept: - case datatransfer.Restart: - case datatransfer.DataReceived: + case datatransfer2.Open: + case datatransfer2.Accept: + case datatransfer2.Restart: + case datatransfer2.DataReceived: silenceEventCode = true - case datatransfer.DataSent: - case datatransfer.Cancel: - case datatransfer.Error: + case datatransfer2.DataSent: + case datatransfer2.Cancel: + case datatransfer2.Error: finish(fmt.Errorf("datatransfer error: %s", event.Message)) return - case datatransfer.CleanupComplete: + case datatransfer2.CleanupComplete: finish(nil) return - case datatransfer.NewVoucher: - case datatransfer.NewVoucherResult: + case datatransfer2.NewVoucher: + case datatransfer2.NewVoucherResult: switch resType := state.LastVoucherResult().(type) { - case *retrievalmarket.DealResponse: + case *legacyretrievaltypes.DealResponse: if len(resType.Message) != 0 { log.Debugf("Received deal response voucher result %s (%v): %s\n\t%+v", resType.Status, resType.Status, resType.Message, resType) } else { @@ -449,11 +450,11 @@ func (c *Client) retrieveContentFromPeerWithProgressCallback( } switch resType.Status { - case retrievalmarket.DealStatusAccepted: + case legacyretrievaltypes.DealStatusAccepted: log.Info("Deal accepted") // Respond with a payment voucher when funds are requested - case retrievalmarket.DealStatusFundsNeeded, retrievalmarket.DealStatusFundsNeededLastPayment: + case legacyretrievaltypes.DealStatusFundsNeeded, legacyretrievaltypes.DealStatusFundsNeededLastPayment: if pchRequired { finish(errors.New("payment channel required")) return @@ -461,19 +462,19 @@ func (c *Client) retrieveContentFromPeerWithProgressCallback( finish(fmt.Errorf("the miner requested payment even though this transaction was determined to be zero cost")) return } - case retrievalmarket.DealStatusRejected: + case legacyretrievaltypes.DealStatusRejected: finish(fmt.Errorf("deal rejected: %s", resType.Message)) return - case retrievalmarket.DealStatusFundsNeededUnseal, retrievalmarket.DealStatusUnsealing: + case legacyretrievaltypes.DealStatusFundsNeededUnseal, legacyretrievaltypes.DealStatusUnsealing: finish(fmt.Errorf("data is sealed")) return - case retrievalmarket.DealStatusCancelled: + case legacyretrievaltypes.DealStatusCancelled: finish(fmt.Errorf("deal cancelled: %s", resType.Message)) return - case retrievalmarket.DealStatusErrored: + case legacyretrievaltypes.DealStatusErrored: finish(fmt.Errorf("deal errored: %s", resType.Message)) return - case retrievalmarket.DealStatusCompleted: + case legacyretrievaltypes.DealStatusCompleted: if allBytesReceived { finish(nil) return @@ -481,26 +482,26 @@ func (c *Client) retrieveContentFromPeerWithProgressCallback( dealComplete = true } } - case datatransfer.PauseInitiator: - case datatransfer.ResumeInitiator: - case datatransfer.PauseResponder: - case datatransfer.ResumeResponder: - case datatransfer.FinishTransfer: + case datatransfer2.PauseInitiator: + case datatransfer2.ResumeInitiator: + case datatransfer2.PauseResponder: + case datatransfer2.ResumeResponder: + case datatransfer2.FinishTransfer: if dealComplete { finish(nil) return } allBytesReceived = true - case datatransfer.ResponderCompletes: - case datatransfer.ResponderBeginsFinalization: - case datatransfer.BeginFinalizing: - case datatransfer.Disconnected: - case datatransfer.Complete: - case datatransfer.CompleteCleanupOnRestart: - case datatransfer.DataQueued: - case datatransfer.DataQueuedProgress: - case datatransfer.DataSentProgress: - case datatransfer.DataReceivedProgress: + case datatransfer2.ResponderCompletes: + case datatransfer2.ResponderBeginsFinalization: + case datatransfer2.BeginFinalizing: + case datatransfer2.Disconnected: + case datatransfer2.Complete: + case datatransfer2.CompleteCleanupOnRestart: + case datatransfer2.DataQueued: + case datatransfer2.DataQueuedProgress: + case datatransfer2.DataSentProgress: + case datatransfer2.DataReceivedProgress: // First byte has been received // publish first byte event @@ -510,17 +511,17 @@ func (c *Client) retrieveContentFromPeerWithProgressCallback( progressCallback(state.Received()) silenceEventCode = true - case datatransfer.RequestTimedOut: - case datatransfer.SendDataError: - case datatransfer.ReceiveDataError: - case datatransfer.TransferRequestQueued: - case datatransfer.RequestCancelled: - case datatransfer.Opened: + case datatransfer2.RequestTimedOut: + case datatransfer2.SendDataError: + case datatransfer2.ReceiveDataError: + case datatransfer2.TransferRequestQueued: + case datatransfer2.RequestCancelled: + case datatransfer2.Opened: default: eventCodeNotHandled = true } - name := datatransfer.Events[event.Code] + name := datatransfer2.Events[event.Code] code := event.Code msg := event.Message blocksIndex := state.ReceivedCidsTotal() @@ -602,12 +603,12 @@ awaitfinished: }, nil } -func RetrievalProposalForAsk(ask *retrievalmarket.QueryResponse, c cid.Cid, optionalSelector ipld.Node) (*retrievalmarket.DealProposal, error) { +func RetrievalProposalForAsk(ask *legacyretrievaltypes.QueryResponse, c cid.Cid, optionalSelector ipld.Node) (*legacyretrievaltypes.DealProposal, error) { if optionalSelector == nil { optionalSelector = selectorparse.CommonSelector_ExploreAllRecursively } - params, err := retrievalmarket.NewParamsV1( + params, err := legacyretrievaltypes.NewParamsV1( ask.MinPricePerByte, ask.MaxPaymentInterval, ask.MaxPaymentIntervalIncrease, @@ -618,9 +619,9 @@ func RetrievalProposalForAsk(ask *retrievalmarket.QueryResponse, c cid.Cid, opti if err != nil { return nil, err } - return &retrievalmarket.DealProposal{ + return &legacyretrievaltypes.DealProposal{ PayloadCID: c, - ID: retrievalmarket.DealID(dealIdGen.Next()), + ID: legacyretrievaltypes.DealID(dealIdGen.Next()), Params: params, }, nil } diff --git a/retrievalmarket/lib/idxciddagstore.go b/retrievalmarket/lib/idxciddagstore.go deleted file mode 100644 index 6f651d1f4..000000000 --- a/retrievalmarket/lib/idxciddagstore.go +++ /dev/null @@ -1,72 +0,0 @@ -package lib - -import ( - "context" - "fmt" - "github.com/filecoin-project/boost/retrievalmarket/server" - "github.com/filecoin-project/dagstore" - "github.com/filecoin-project/dagstore/indexbs" - "github.com/filecoin-project/dagstore/shard" - "github.com/ipfs/go-cid" - "github.com/multiformats/go-multihash" -) - -// IndexBackedBlockstoreDagstore implements the dagstore interface needed -// by the IndexBackedBlockstore. -// The implementation of ShardsContainingCid handles identity cids. -type IndexBackedBlockstoreDagstore struct { - dagstore.Interface -} - -var _ dagstore.Interface = (*IndexBackedBlockstoreDagstore)(nil) - -func NewIndexBackedBlockstoreDagstore(ds dagstore.Interface) indexbs.IdxBstoreDagstore { - return &IndexBackedBlockstoreDagstore{Interface: ds} -} - -// ShardsContainingCid checks the db for shards containing the given cid. -// If there are no shards with that cid, it checks if the shard is an identity -// cid, and gets the shards containing the identity cid's child cids. -// This is for the case where the identity cid was not stored in the original -// CAR file's index (but the identity cid's child cids are in the index). -func (i *IndexBackedBlockstoreDagstore) ShardsContainingCid(ctx context.Context, c cid.Cid) ([]shard.Key, error) { - shards, err := i.Interface.ShardsContainingMultihash(ctx, c.Hash()) - if err == nil { - return shards, nil - } - - var idErr error - piecesWithTargetBlock, idErr := server.GetCommonPiecesFromIdentityCidLinks(ctx, func(ctx context.Context, mh multihash.Multihash) ([]cid.Cid, error) { - return i.piecesContainingBlock(ctx, mh) - }, c) - if idErr != nil { - return nil, fmt.Errorf("getting common pieces for cid %s: %w", c, idErr) - } - if len(piecesWithTargetBlock) == 0 { - // No pieces found for cid: return the original error from the call to - // ShardsContainingMultihash above - return nil, fmt.Errorf("getting pieces for cid %s: %w", c, err) - } - - shards = make([]shard.Key, 0, len(piecesWithTargetBlock)) - for _, pcid := range piecesWithTargetBlock { - shards = append(shards, shard.KeyFromCID(pcid)) - } - return shards, nil -} - -func (i *IndexBackedBlockstoreDagstore) piecesContainingBlock(ctx context.Context, mh multihash.Multihash) ([]cid.Cid, error) { - shards, err := i.Interface.ShardsContainingMultihash(ctx, mh) - if err != nil { - return nil, fmt.Errorf("finding shards containing child mh %s: %w", mh, err) - } - pcids := make([]cid.Cid, 0, len(shards)) - for _, s := range shards { - pcid, err := cid.Parse(s.String()) - if err != nil { - return nil, fmt.Errorf("parsing shard into cid: %w", err) - } - pcids = append(pcids, pcid) - } - return pcids, nil -} diff --git a/retrievalmarket/lib/shardselector.go b/retrievalmarket/lib/shardselector.go deleted file mode 100644 index 2fe71c529..000000000 --- a/retrievalmarket/lib/shardselector.go +++ /dev/null @@ -1,181 +0,0 @@ -package lib - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/filecoin-project/boost-gfm/piecestore" - "github.com/filecoin-project/boost-gfm/retrievalmarket" - "github.com/filecoin-project/dagstore/indexbs" - "github.com/filecoin-project/dagstore/shard" - "github.com/filecoin-project/go-state-types/abi" - lru "github.com/hnlq715/golang-lru" - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" -) - -var sslog = logging.Logger("shardselect") - -// ShardSelector is used by the dagstore's index-backed blockstore to select -// the best shard from which to retrieve a particular cid. -// It chooses the first shard that is unsealed and free (zero cost). -// It caches the results per-shard. -type ShardSelector struct { - ctx context.Context - ps piecestore.PieceStore - sa retrievalmarket.SectorAccessor - rp retrievalmarket.RetrievalProvider - - // The striped lock protects against multiple threads doing a lookup - // against the sealing subsystem / retrieval ask for the same shard - stripedLock [256]sync.Mutex - cache *lru.Cache -} - -func NewShardSelector(ctx context.Context, ps piecestore.PieceStore, sa retrievalmarket.SectorAccessor, rp retrievalmarket.RetrievalProvider) (*ShardSelector, error) { - cache, err := lru.New(2048) - if err != nil { - return nil, fmt.Errorf("creating shard selector cache: %w", err) - } - return &ShardSelector{ctx: ctx, ps: ps, sa: sa, rp: rp, cache: cache}, nil -} - -var selectorCacheDuration = 10 * time.Minute -var selectorCacheErrorDuration = time.Minute - -type shardSelectResult struct { - available bool - err error -} - -// ShardSelectorF chooses the first shard that is unsealed and free (zero cost) -func (s *ShardSelector) ShardSelectorF(c cid.Cid, shards []shard.Key) (shard.Key, error) { - // If no shards are selected, return ErrNoShardSelected - lastErr := indexbs.ErrNoShardSelected - - sslog.Debugw("shard selection", "shards", shards) - for _, sk := range shards { - lkidx := s.stripedLockIndex(sk) - s.stripedLock[lkidx].Lock() - available, err := s.isAvailable(sk) - s.stripedLock[lkidx].Unlock() - - if available { - // We found an available shard, return it - sslog.Debugw("shard selected", "shard", sk) - return sk, nil - } - if err != nil { - sslog.Debugw("shard error", "shard", sk, "err", err) - lastErr = err - } - } - - // None of the shards are available - sslog.Debugw("no shard selected", "shards", shards, "err", lastErr) - return shard.Key{}, lastErr -} - -func (s *ShardSelector) isAvailable(sk shard.Key) (bool, error) { - // Check if the shard key is in the cache - var res *shardSelectResult - resi, cached := s.cache.Get(sk) - if cached { - res = resi.(*shardSelectResult) - sslog.Debugw("shard cache hit", "shard", sk) - return res.available, res.err - } - sslog.Debugw("shard cache miss", "shard", sk) - - // Check if the shard is available - res = &shardSelectResult{} - res.available, res.err = s.checkIsAvailable(sk) - expireIn := selectorCacheDuration - if res.err != nil { - // If there's an error, cache for a short duration so that we - // don't wait too long to try again. - expireIn = selectorCacheErrorDuration - res.available = false - res.err = fmt.Errorf("running shard selection for shard %s: %w", sk, res.err) - sslog.Warnw("checking shard availability", "shard", sk, "err", res.err) - } - // Add the result to the cache - s.cache.AddEx(sk, res, expireIn) - - return res.available, res.err -} - -func (s *ShardSelector) checkIsAvailable(sk shard.Key) (bool, error) { - // Parse piece CID - pieceCid, err := cid.Parse(sk.String()) - if err != nil { - return false, fmt.Errorf("parsing shard key as cid: %w", err) - } - - // Read piece info from piece store - sslog.Debugw("getting piece info", "shard", sk) - pieceInfo, err := s.ps.GetPieceInfo(pieceCid) - if err != nil { - return false, fmt.Errorf("get piece info: %w", err) - } - - // Filter for deals that are unsealed - sslog.Debugw("filtering for unsealed deals", "shard", sk, "deals", len(pieceInfo.Deals)) - unsealedDeals := make([]piecestore.DealInfo, 0, len(pieceInfo.Deals)) - var lastErr error - for _, di := range pieceInfo.Deals { - isUnsealed, err := s.sa.IsUnsealed(s.ctx, di.SectorID, di.Offset.Unpadded(), di.Length.Unpadded()) - if err != nil { - sslog.Warnf("checking if sector is unsealed", "shard", sk, "sector", di.SectorID, sk, "err", err) - lastErr = err - continue - } - - if isUnsealed { - sslog.Debugw("sector is unsealed", "shard", sk, "sector", di.SectorID) - unsealedDeals = append(unsealedDeals, di) - } else { - sslog.Debugw("sector is sealed", "shard", sk, "sector", di.SectorID) - } - } - - if len(unsealedDeals) == 0 { - // It wasn't possible to find an unsealed sector - sslog.Debugw("no unsealed deals found", "shard", sk) - return false, lastErr - } - - // Check if the piece is available for free (zero-cost) retrieval - input := retrievalmarket.PricingInput{ - // Piece from which the payload will be retrieved - PieceCID: pieceInfo.PieceCID, - Unsealed: true, - } - - var dealsIds []abi.DealID - for _, d := range unsealedDeals { - dealsIds = append(dealsIds, d.DealID) - } - - sslog.Debugw("getting dynamic asking price for unsealed deals", "shard", sk, "deals", len(unsealedDeals)) - ask, err := s.rp.GetDynamicAsk(s.ctx, input, dealsIds) - if err != nil { - return false, fmt.Errorf("getting retrieval ask: %w", err) - } - - // The piece is available for free retrieval - if ask.PricePerByte.NilOrZero() { - sslog.Debugw("asking price for unsealed deals is zero", "shard", sk) - return true, nil - } - - sslog.Debugw("asking price-per-byte for unsealed deals is non-zero", "shard", sk, "price", ask.PricePerByte.String()) - return false, nil -} - -func (s *ShardSelector) stripedLockIndex(sk shard.Key) int { - skstr := sk.String() - return int(skstr[len(skstr)-1]) -} diff --git a/retrievalmarket/lib/shardselector_test.go b/retrievalmarket/lib/shardselector_test.go deleted file mode 100644 index 7cbf99927..000000000 --- a/retrievalmarket/lib/shardselector_test.go +++ /dev/null @@ -1,129 +0,0 @@ -package lib - -import ( - "context" - "github.com/filecoin-project/boost-gfm/piecestore" - "github.com/filecoin-project/boost-gfm/retrievalmarket" - "github.com/filecoin-project/boost/retrievalmarket/mock" - "github.com/filecoin-project/boost/testutil" - "github.com/filecoin-project/dagstore/indexbs" - "github.com/filecoin-project/dagstore/shard" - "github.com/filecoin-project/go-state-types/abi" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" - "golang.org/x/sync/errgroup" - "testing" -) - -func TestShardSelector(t *testing.T) { - ctx := context.Background() - - testCases := []struct { - name string - deals []piecestore.DealInfo - isUnsealed []bool - pricePerByte int64 - expectErr error - }{{ - name: "no deals", - deals: nil, - expectErr: indexbs.ErrNoShardSelected, - }, { - name: "only sealed deals", - deals: []piecestore.DealInfo{{SectorID: 0}, {SectorID: 1}}, - isUnsealed: []bool{false, false}, // index corresponds to sector ID - expectErr: indexbs.ErrNoShardSelected, - }, { - name: "one unsealed deal but non-zero price", - deals: []piecestore.DealInfo{{SectorID: 0}, {SectorID: 1}}, - isUnsealed: []bool{false, true}, // index corresponds to sector ID - pricePerByte: 1, - expectErr: indexbs.ErrNoShardSelected, - }, { - name: "one unsealed deal with zero price", - deals: []piecestore.DealInfo{{SectorID: 0}, {SectorID: 1}}, - isUnsealed: []bool{false, true}, // index corresponds to sector ID - pricePerByte: 0, - expectErr: nil, - }} - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - ctrl := gomock.NewController(t) - pieceStore := mock.NewMockPieceStore(ctrl) - sectorAccessor := mock.NewMockSectorAccessor(ctrl) - retrievalProv := mock.NewMockRetrievalProvider(ctrl) - ss, err := NewShardSelector(ctx, pieceStore, sectorAccessor, retrievalProv) - require.NoError(t, err) - - blockCid := testutil.GenerateCid() - require.NoError(t, err) - sk1cid := testutil.GenerateCid() - sk1 := shard.KeyFromCID(sk1cid) - shards := []shard.Key{sk1} - - pi := piecestore.PieceInfo{ - PieceCID: testutil.GenerateCid(), - Deals: tc.deals, - } - pieceStore.EXPECT().GetPieceInfo(sk1cid).AnyTimes().Return(pi, nil) - - for _, dl := range tc.deals { - isUnsealed := tc.isUnsealed[dl.SectorID] - sectorAccessor.EXPECT().IsUnsealed(gomock.Any(), dl.SectorID, gomock.Any(), gomock.Any()).AnyTimes().Return(isUnsealed, nil) - } - - ask := retrievalmarket.Ask{PricePerByte: abi.NewTokenAmount(tc.pricePerByte)} - retrievalProv.EXPECT().GetDynamicAsk(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(ask, nil) - - sk, err := ss.ShardSelectorF(blockCid, shards) - if tc.expectErr != nil { - require.ErrorIs(t, err, tc.expectErr) - } else { - require.NoError(t, err) - require.Equal(t, sk1, sk) - } - }) - } -} - -func TestShardSelectorCache(t *testing.T) { - ctx := context.Background() - - ctrl := gomock.NewController(t) - pieceStore := mock.NewMockPieceStore(ctrl) - sectorAccessor := mock.NewMockSectorAccessor(ctrl) - retrievalProv := mock.NewMockRetrievalProvider(ctrl) - ss, err := NewShardSelector(ctx, pieceStore, sectorAccessor, retrievalProv) - require.NoError(t, err) - - blockCid := testutil.GenerateCid() - require.NoError(t, err) - sk1cid := testutil.GenerateCid() - sk1 := shard.KeyFromCID(sk1cid) - shards := []shard.Key{sk1} - - pi := piecestore.PieceInfo{ - PieceCID: testutil.GenerateCid(), - Deals: []piecestore.DealInfo{{SectorID: 1}}, - } - pieceStore.EXPECT().GetPieceInfo(sk1cid). - // Expect there to be only one call to GetPieceInfo because after the first - // call the result should be cached - MinTimes(1).MaxTimes(1). - Return(pi, nil) - - sectorAccessor.EXPECT().IsUnsealed(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(true, nil) - - ask := retrievalmarket.Ask{PricePerByte: abi.NewTokenAmount(0)} - retrievalProv.EXPECT().GetDynamicAsk(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(ask, nil) - - var wg errgroup.Group - for i := 0; i < 100; i++ { - wg.Go(func() error { - _, err := ss.ShardSelectorF(blockCid, shards) - return err - }) - } - require.NoError(t, wg.Wait()) -} diff --git a/retrievalmarket/lp2pimpl/transports.go b/retrievalmarket/lp2pimpl/transports.go index 623e32e9d..f131f6017 100644 --- a/retrievalmarket/lp2pimpl/transports.go +++ b/retrievalmarket/lp2pimpl/transports.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - "github.com/filecoin-project/boost-gfm/shared" + "github.com/filecoin-project/boost/markets/shared" "github.com/filecoin-project/boost/retrievalmarket/types" logging "github.com/ipfs/go-log/v2" "github.com/ipld/go-ipld-prime/codec/dagcbor" diff --git a/retrievalmarket/mock/gen.go b/retrievalmarket/mock/gen.go index 60a1a8419..8b5d222bc 100644 --- a/retrievalmarket/mock/gen.go +++ b/retrievalmarket/mock/gen.go @@ -1,4 +1,4 @@ package mock -//go:generate go run github.com/golang/mock/mockgen -destination=./piecestore.go -package=mock github.com/filecoin-project/go-fil-markets/piecestore PieceStore -//go:generate go run github.com/golang/mock/mockgen -destination=./retrievalmarket.go -package=mock github.com/filecoin-project/go-fil-markets/retrievalmarket RetrievalProvider,SectorAccessor +//go:generate go run github.com/golang/mock/mockgen -destination=./piecestore.go -package=mock github.com/filecoin-project/boost/node/modules/piecestore PieceStore +//go:generate go run github.com/golang/mock/mockgen -destination=./retrievalmarket.go -package=mock github.com/filecoin-project/Boost/retrievalmarket/legacyretrievaltypes RetrievalProvider,SectorAccessor diff --git a/retrievalmarket/mock/piecestore.go b/retrievalmarket/mock/piecestore.go index daadaf346..383d72c7e 100644 --- a/retrievalmarket/mock/piecestore.go +++ b/retrievalmarket/mock/piecestore.go @@ -1,17 +1,17 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/filecoin-project/go-fil-markets/piecestore (interfaces: PieceStore) +// Source: github.com/filecoin-project/boost/node/modules/piecestore (interfaces: PieceStore) // Package mock is a generated GoMock package. package mock import ( - context "context" - reflect "reflect" + "context" + "reflect" - piecestore "github.com/filecoin-project/boost-gfm/piecestore" - shared "github.com/filecoin-project/boost-gfm/shared" - gomock "github.com/golang/mock/gomock" - cid "github.com/ipfs/go-cid" + "github.com/filecoin-project/boost/markets/piecestore" + "github.com/filecoin-project/go-fil-markets/shared" + "github.com/golang/mock/gomock" + "github.com/ipfs/go-cid" ) // MockPieceStore is a mock of PieceStore interface. diff --git a/retrievalmarket/mock/retrievalmarket.go b/retrievalmarket/mock/retrievalmarket.go index fcc04cf05..9e4c9d075 100644 --- a/retrievalmarket/mock/retrievalmarket.go +++ b/retrievalmarket/mock/retrievalmarket.go @@ -9,8 +9,8 @@ import ( io "io" reflect "reflect" - retrievalmarket "github.com/filecoin-project/boost-gfm/retrievalmarket" - shared "github.com/filecoin-project/boost-gfm/shared" + retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket" + shared "github.com/filecoin-project/go-fil-markets/shared" abi "github.com/filecoin-project/go-state-types/abi" gomock "github.com/golang/mock/gomock" ) diff --git a/retrievalmarket/rtvllog/db.go b/retrievalmarket/rtvllog/db.go index 5a89b79d8..5ee646a63 100644 --- a/retrievalmarket/rtvllog/db.go +++ b/retrievalmarket/rtvllog/db.go @@ -8,8 +8,8 @@ import ( "strings" "time" - "github.com/filecoin-project/boost-gfm/retrievalmarket" - datatransfer "github.com/filecoin-project/go-data-transfer" + datatransfer2 "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/ipfs/go-cid" @@ -32,8 +32,8 @@ type RetrievalDealState struct { UpdatedAt time.Time LocalPeerID peer.ID PeerID peer.ID - DealID retrievalmarket.DealID - TransferID datatransfer.TransferID + DealID legacyretrievaltypes.DealID + TransferID datatransfer2.TransferID PayloadCID cid.Cid PieceCID *cid.Cid PaymentInterval uint64 @@ -278,7 +278,7 @@ func (d *RetrievalLogDB) Count(ctx context.Context, isIndexer *bool) (int, error return count, err } -func (d *RetrievalLogDB) Update(ctx context.Context, state retrievalmarket.ProviderDealState) error { +func (d *RetrievalLogDB) Update(ctx context.Context, state legacyretrievaltypes.ProviderDealState) error { fields := map[string]interface{}{ "Status": state.Status.String(), "TotalSent": state.TotalSent, @@ -297,7 +297,7 @@ func (d *RetrievalLogDB) Update(ctx context.Context, state retrievalmarket.Provi return d.update(ctx, fields, where, args...) } -func (d *RetrievalLogDB) UpdateDataTransferState(ctx context.Context, event datatransfer.Event, state datatransfer.ChannelState) error { +func (d *RetrievalLogDB) UpdateDataTransferState(ctx context.Context, event datatransfer2.Event, state datatransfer2.ChannelState) error { peerID := state.OtherPeer().String() transferID := state.TransferID() if err := d.insertDTEvent(ctx, peerID, transferID, event); err != nil { @@ -305,7 +305,7 @@ func (d *RetrievalLogDB) UpdateDataTransferState(ctx context.Context, event data } fields := map[string]interface{}{ - "DTStatus": datatransfer.Statuses[state.Status()], + "DTStatus": datatransfer2.Statuses[state.Status()], "DTMessage": state.Message(), "UpdatedAt": time.Now(), } @@ -326,10 +326,10 @@ func (d *RetrievalLogDB) update(ctx context.Context, fields map[string]interface return err } -func (d *RetrievalLogDB) insertDTEvent(ctx context.Context, peerID string, transferID datatransfer.TransferID, event datatransfer.Event) error { +func (d *RetrievalLogDB) insertDTEvent(ctx context.Context, peerID string, transferID datatransfer2.TransferID, event datatransfer2.Event) error { qry := "INSERT INTO RetrievalDataTransferEvents (PeerID, TransferID, CreatedAt, Name, Message) " + "VALUES (?, ?, ?, ?, ?)" - _, err := d.db.ExecContext(ctx, qry, peerID, transferID, event.Timestamp, datatransfer.Events[event.Code], event.Message) + _, err := d.db.ExecContext(ctx, qry, peerID, transferID, event.Timestamp, datatransfer2.Events[event.Code], event.Message) return err } @@ -339,7 +339,7 @@ type DTEvent struct { Message string } -func (d *RetrievalLogDB) ListDTEvents(ctx context.Context, peerID string, transferID datatransfer.TransferID) ([]DTEvent, error) { +func (d *RetrievalLogDB) ListDTEvents(ctx context.Context, peerID string, transferID datatransfer2.TransferID) ([]DTEvent, error) { qry := "SELECT CreatedAt, Name, Message " + "FROM RetrievalDataTransferEvents " + "WHERE PeerID = ? AND TransferID = ? " + @@ -372,10 +372,10 @@ func (d *RetrievalLogDB) ListDTEvents(ctx context.Context, peerID string, transf return dtEvents, nil } -func (d *RetrievalLogDB) InsertMarketsEvent(ctx context.Context, event retrievalmarket.ProviderEvent, state retrievalmarket.ProviderDealState) error { +func (d *RetrievalLogDB) InsertMarketsEvent(ctx context.Context, event legacyretrievaltypes.ProviderEvent, state legacyretrievaltypes.ProviderDealState) error { // Ignore block sent events as we are recording the equivalent event for // data-transfer, and it's a high-frequency event - if event == retrievalmarket.ProviderEventBlockSent { + if event == legacyretrievaltypes.ProviderEventBlockSent { return nil } @@ -385,7 +385,7 @@ func (d *RetrievalLogDB) InsertMarketsEvent(ctx context.Context, event retrieval state.Receiver.String(), state.ID, time.Now(), - retrievalmarket.ProviderEvents[event], + legacyretrievaltypes.ProviderEvents[event], state.Status.String(), state.Message) return err @@ -398,7 +398,7 @@ type MarketEvent struct { Message string } -func (d *RetrievalLogDB) ListMarketEvents(ctx context.Context, peerID string, dealID retrievalmarket.DealID) ([]MarketEvent, error) { +func (d *RetrievalLogDB) ListMarketEvents(ctx context.Context, peerID string, dealID legacyretrievaltypes.DealID) ([]MarketEvent, error) { qry := "SELECT CreatedAt, Name, Status, Message " + "FROM RetrievalMarketEvents " + "WHERE PeerID = ? AND DealID = ? " + diff --git a/retrievalmarket/rtvllog/retrieval_log.go b/retrievalmarket/rtvllog/retrieval_log.go index 3711bf577..c1c0941be 100644 --- a/retrievalmarket/rtvllog/retrieval_log.go +++ b/retrievalmarket/rtvllog/retrieval_log.go @@ -2,14 +2,12 @@ package rtvllog import ( "context" - "errors" "sync" "time" - "github.com/filecoin-project/boost-gfm/retrievalmarket" - "github.com/filecoin-project/boost/node/modules/dtypes" + datatransfer2 "github.com/filecoin-project/boost/datatransfer" "github.com/filecoin-project/boost/retrievalmarket/server" - datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" logging "github.com/ipfs/go-log/v2" ) @@ -18,7 +16,6 @@ var log = logging.Logger("rtrvlog") type RetrievalLog struct { db *RetrievalLogDB duration time.Duration - dataTransfer dtypes.ProviderDataTransfer gsur *server.GraphsyncUnpaidRetrieval stalledTimeout time.Duration ctx context.Context @@ -29,7 +26,7 @@ type RetrievalLog struct { lastUpdate map[string]time.Time } -func NewRetrievalLog(db *RetrievalLogDB, duration time.Duration, dt dtypes.ProviderDataTransfer, stalledTimeout time.Duration, gsur *server.GraphsyncUnpaidRetrieval) *RetrievalLog { +func NewRetrievalLog(db *RetrievalLogDB, duration time.Duration, stalledTimeout time.Duration, gsur *server.GraphsyncUnpaidRetrieval) *RetrievalLog { if duration < stalledTimeout { log.Warnf("the RetrievalLogDuration (%s) should exceed the StalledRetrievalTimeout (%s)", duration.String(), stalledTimeout.String()) } @@ -37,7 +34,6 @@ func NewRetrievalLog(db *RetrievalLogDB, duration time.Duration, dt dtypes.Provi return &RetrievalLog{ db: db, duration: duration, - dataTransfer: dt, gsur: gsur, stalledTimeout: stalledTimeout, dbUpdates: make(chan func(), 256), @@ -49,12 +45,11 @@ func (r *RetrievalLog) Start(ctx context.Context) { r.ctx = ctx go r.gcUpdateMap(ctx) go r.gcDatabase(ctx) - go r.gcRetrievals(ctx) go r.processDBUpdates(ctx) } // Called when there is a retrieval ask query -func (r *RetrievalLog) OnQueryEvent(evt retrievalmarket.ProviderQueryEvent) { +func (r *RetrievalLog) OnQueryEvent(evt legacyretrievaltypes.ProviderQueryEvent) { log.Debugw("query-event", "status", evt.Response.Status, "msg", evt.Response.Message, @@ -71,10 +66,10 @@ func (r *RetrievalLog) OnQueryEvent(evt retrievalmarket.ProviderQueryEvent) { st.Message = evt.Error.Error() } } else { - if evt.Response.Status == retrievalmarket.QueryResponseUnavailable { + if evt.Response.Status == legacyretrievaltypes.QueryResponseUnavailable { st.Status = "unavailable" } - if evt.Response.Status == retrievalmarket.QueryResponseError { + if evt.Response.Status == legacyretrievaltypes.QueryResponseError { st.Status = "errored" } } @@ -92,10 +87,10 @@ func (r *RetrievalLog) OnQueryEvent(evt retrievalmarket.ProviderQueryEvent) { // This occurs when the client makes a graphsync retrieval request, and the // Storage Provider validates the request (eg checking its parameters for // validity, checking for acceptance against the retrieval filter, etc) -func (r *RetrievalLog) OnValidationEvent(evt retrievalmarket.ProviderValidationEvent) { +func (r *RetrievalLog) OnValidationEvent(evt legacyretrievaltypes.ProviderValidationEvent) { // Ignore ErrPause and ErrResume because they are signalling errors, not // actual errors because of incorrect behaviour. - if evt.Error == nil || evt.Error == datatransfer.ErrPause || evt.Error == datatransfer.ErrResume { + if evt.Error == nil || evt.Error == datatransfer2.ErrPause || evt.Error == datatransfer2.ErrResume { return } @@ -103,7 +98,7 @@ func (r *RetrievalLog) OnValidationEvent(evt retrievalmarket.ProviderValidationE st := &RetrievalDealState{ PeerID: evt.Receiver, PayloadCID: evt.BaseCid, - Status: retrievalmarket.DealStatusErrored.String(), + Status: legacyretrievaltypes.DealStatusErrored.String(), Message: evt.Error.Error(), } if evt.Response != nil { @@ -132,10 +127,10 @@ func (r *RetrievalLog) OnValidationEvent(evt retrievalmarket.ProviderValidationE } // Called when there is an event from the data-transfer subsystem -func (r *RetrievalLog) OnDataTransferEvent(event datatransfer.Event, state datatransfer.ChannelState) { +func (r *RetrievalLog) OnDataTransferEvent(event datatransfer2.Event, state datatransfer2.ChannelState) { log.Debugw("dt-event", - "evt", datatransfer.Events[event.Code], - "status", datatransfer.Statuses[state.Status()], + "evt", datatransfer2.Events[event.Code], + "status", datatransfer2.Statuses[state.Status()], "message", state.Message(), "is-pull", state.IsPull()) @@ -145,10 +140,10 @@ func (r *RetrievalLog) OnDataTransferEvent(event datatransfer.Event, state datat } switch event.Code { - case datatransfer.DataQueued, datatransfer.DataQueuedProgress, datatransfer.DataSentProgress, - datatransfer.DataReceived, datatransfer.DataReceivedProgress: + case datatransfer2.DataQueued, datatransfer2.DataQueuedProgress, datatransfer2.DataSentProgress, + datatransfer2.DataReceived, datatransfer2.DataReceivedProgress: return - case datatransfer.DataSent: + case datatransfer2.DataSent: // To prevent too frequent updates, only allow data sent updates if it's // been more than half a second since the last one if !r.allowUpdate(state.ChannelID().String()) { @@ -165,29 +160,29 @@ func (r *RetrievalLog) OnDataTransferEvent(event datatransfer.Event, state datat } // Called when there is a markets event -func (r *RetrievalLog) OnRetrievalEvent(event retrievalmarket.ProviderEvent, state retrievalmarket.ProviderDealState) { +func (r *RetrievalLog) OnRetrievalEvent(event legacyretrievaltypes.ProviderEvent, state legacyretrievaltypes.ProviderDealState) { // To prevent too frequent updates, only allow block sent updates if it's // been more than half a second since the last one - if event == retrievalmarket.ProviderEventBlockSent && !r.allowUpdate(state.ChannelID.String()) { + if event == legacyretrievaltypes.ProviderEventBlockSent && !r.allowUpdate(state.ChannelID.String()) { return } - var transferID datatransfer.TransferID + var transferID datatransfer2.TransferID if state.ChannelID != nil { log.Debugw("event", - "evt", retrievalmarket.ProviderEvents[event], + "evt", legacyretrievaltypes.ProviderEvents[event], "status", state.Status, "initiator", state.ChannelID.Initiator, "responder", state.ChannelID.Responder, "transfer id", state.ChannelID.ID) transferID = state.ChannelID.ID } else { - log.Debugw("event", "evt", retrievalmarket.ProviderEvents[event], "status", state.Status) + log.Debugw("event", "evt", legacyretrievaltypes.ProviderEvents[event], "status", state.Status) } r.dbUpdate(func() { var err error - if event == retrievalmarket.ProviderEventOpen { + if event == legacyretrievaltypes.ProviderEventOpen { err = r.db.Insert(r.ctx, &RetrievalDealState{ PeerID: state.Receiver, DealID: state.ID, @@ -278,58 +273,6 @@ func (r *RetrievalLog) gcDatabase(ctx context.Context) { } } -// Periodically cancels stalled retrievals older than 30mins -func (r *RetrievalLog) gcRetrievals(ctx context.Context) { - ticker := time.NewTicker(5 * time.Minute) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - return - case now := <-ticker.C: - // Get retrievals last updated - rows, err := r.db.ListLastUpdatedAndOpen(ctx, now.Add(-r.stalledTimeout)) - - if err != nil { - log.Errorw("error fetching open, stalled retrievals", "err", err) - continue - } - - var wg sync.WaitGroup - for _, row := range rows { - if row.TransferID <= 0 { - continue - } - wg.Add(1) - go func(s RetrievalDealState) { - // Don't wait for more than 5 seconds for the cancel - // message to be sent when cancelling an unpaid retrieval - unpaidRtrvCtx, cancel := context.WithTimeout(ctx, time.Second*5) - defer cancel() - defer wg.Done() - - // Try to cancel an unpaid retrieval with the given transfer id first - err := r.gsur.CancelTransfer(unpaidRtrvCtx, s.TransferID, &s.PeerID) - if err != nil && errors.Is(err, server.ErrRetrievalNotFound) { - // Couldn't find an unpaid retrieval with that id, try - // to cancel a legacy, paid retrieval - chid := datatransfer.ChannelID{Initiator: s.PeerID, Responder: s.LocalPeerID, ID: s.TransferID} - err = r.dataTransfer.CloseDataTransferChannel(ctx, chid) - } - - if err != nil { - log.Debugw("error canceling retrieval", "dealID", s.DealID, "err", err) - } else { - log.Infof("Canceled retrieval %s, older than %s", s.DealID, r.stalledTimeout) - } - }(row) - } - wg.Wait() - } - } -} - // Perform database updates in a separate thread so that they don't block the // event publisher loop func (r *RetrievalLog) dbUpdate(update func()) { diff --git a/retrievalmarket/server/channelstate.go b/retrievalmarket/server/channelstate.go index 1f1b48ed8..9f596eae2 100644 --- a/retrievalmarket/server/channelstate.go +++ b/retrievalmarket/server/channelstate.go @@ -3,9 +3,9 @@ package server import ( "bytes" - "github.com/filecoin-project/boost-gfm/retrievalmarket" graphsync "github.com/filecoin-project/boost-graphsync" - datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" "github.com/ipfs/go-cid" "github.com/ipld/go-ipld-prime" "github.com/ipld/go-ipld-prime/codec/dagcbor" @@ -22,12 +22,12 @@ const RetrievalTypeLegs RetrievalType = "Legs" type retrievalState struct { retType RetrievalType cs *channelState - mkts *retrievalmarket.ProviderDealState + mkts *legacyretrievaltypes.ProviderDealState gsReq graphsync.RequestID } -func (r retrievalState) ChannelState() channelState { return *r.cs } -func (r retrievalState) ProviderDealState() retrievalmarket.ProviderDealState { return *r.mkts } +func (r retrievalState) ChannelState() channelState { return *r.cs } +func (r retrievalState) ProviderDealState() legacyretrievaltypes.ProviderDealState { return *r.mkts } // channelState is immutable channel data plus mutable state type channelState struct { @@ -102,8 +102,8 @@ func (c channelState) Selector() ipld.Node { } // Voucher returns the voucher for this data transfer -func (c channelState) Voucher() datatransfer.Voucher { - return nil +func (c channelState) Voucher() datatransfer.TypedVoucher { + return datatransfer.TypedVoucher{} } // ReceivedCidsTotal returns the number of (non-unique) cids received so far @@ -149,22 +149,6 @@ func (c channelState) Message() string { return c.message } -func (c channelState) Vouchers() []datatransfer.Voucher { - return nil -} - -func (c channelState) LastVoucher() datatransfer.Voucher { - return nil -} - -func (c channelState) LastVoucherResult() datatransfer.VoucherResult { - return nil -} - -func (c channelState) VoucherResults() []datatransfer.VoucherResult { - return nil -} - func (c channelState) SelfPeer() peer.ID { return c.selfPeer } diff --git a/retrievalmarket/server/events.go b/retrievalmarket/server/events.go index a8d1efb23..fea7db765 100644 --- a/retrievalmarket/server/events.go +++ b/retrievalmarket/server/events.go @@ -2,23 +2,24 @@ package server import ( "errors" - "github.com/filecoin-project/boost-gfm/retrievalmarket" - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/hannahhoward/go-pubsub" "time" + + datatransfer2 "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" + "github.com/hannahhoward/go-pubsub" ) -func (g *GraphsyncUnpaidRetrieval) SubscribeToDataTransferEvents(subscriber datatransfer.Subscriber) datatransfer.Unsubscribe { - return datatransfer.Unsubscribe(g.pubSubDT.Subscribe(subscriber)) +func (g *GraphsyncUnpaidRetrieval) SubscribeToDataTransferEvents(subscriber datatransfer2.Subscriber) datatransfer2.Unsubscribe { + return datatransfer2.Unsubscribe(g.pubSubDT.Subscribe(subscriber)) } type dtEvent struct { - evt datatransfer.Event - state datatransfer.ChannelState + evt datatransfer2.Event + state datatransfer2.ChannelState } -func (g *GraphsyncUnpaidRetrieval) publishDTEvent(evtCode datatransfer.EventCode, msg string, chst datatransfer.ChannelState) { - evt := datatransfer.Event{ +func (g *GraphsyncUnpaidRetrieval) publishDTEvent(evtCode datatransfer2.EventCode, msg string, chst datatransfer2.ChannelState) { + evt := datatransfer2.Event{ Code: evtCode, Message: msg, Timestamp: time.Now(), @@ -34,7 +35,7 @@ func eventDispatcherDT(evt pubsub.Event, subscriberFn pubsub.SubscriberFn) error if !ok { return errors.New("wrong type of event") } - cb, ok := subscriberFn.(datatransfer.Subscriber) + cb, ok := subscriberFn.(datatransfer2.Subscriber) if !ok { return errors.New("wrong type of subscriber function") } @@ -42,16 +43,16 @@ func eventDispatcherDT(evt pubsub.Event, subscriberFn pubsub.SubscriberFn) error return nil } -func (g *GraphsyncUnpaidRetrieval) SubscribeToMarketsEvents(subscriber retrievalmarket.ProviderSubscriber) retrievalmarket.Unsubscribe { - return retrievalmarket.Unsubscribe(g.pubSubMkts.Subscribe(subscriber)) +func (g *GraphsyncUnpaidRetrieval) SubscribeToMarketsEvents(subscriber ProviderSubscriber) legacyretrievaltypes.Unsubscribe { + return legacyretrievaltypes.Unsubscribe(g.pubSubMkts.Subscribe(subscriber)) } type mktsEvent struct { - evt retrievalmarket.ProviderEvent - state retrievalmarket.ProviderDealState + evt legacyretrievaltypes.ProviderEvent + state legacyretrievaltypes.ProviderDealState } -func (g *GraphsyncUnpaidRetrieval) publishMktsEvent(evt retrievalmarket.ProviderEvent, state retrievalmarket.ProviderDealState) { +func (g *GraphsyncUnpaidRetrieval) publishMktsEvent(evt legacyretrievaltypes.ProviderEvent, state legacyretrievaltypes.ProviderDealState) { err := g.pubSubMkts.Publish(mktsEvent{evt: evt, state: state}) if err != nil { log.Warnf("err publishing markets event: %s", err.Error()) @@ -63,10 +64,19 @@ func eventDispatcherMkts(evt pubsub.Event, subscriberFn pubsub.SubscriberFn) err if !ok { return errors.New("wrong type of event") } - cb, ok := subscriberFn.(retrievalmarket.ProviderSubscriber) + cb, ok := subscriberFn.(ProviderSubscriber) if !ok { return errors.New("wrong type of event") } cb(ie.evt, ie.state) return nil } + +// ProviderSubscriber is a callback that is registered to listen for retrieval events on a provider +type ProviderSubscriber func(event legacyretrievaltypes.ProviderEvent, state legacyretrievaltypes.ProviderDealState) + +// ProviderQueryEventSubscriber is a callback that is registered to listen for query message events +type ProviderQueryEventSubscriber func(evt legacyretrievaltypes.ProviderQueryEvent) + +// ProviderValidationSubscriber is a callback that is registered to listen for validation events +type ProviderValidationSubscriber func(evt legacyretrievaltypes.ProviderValidationEvent) diff --git a/retrievalmarket/server/gsunpaidretrieval.go b/retrievalmarket/server/gsunpaidretrieval.go index e9b76b388..f20625ceb 100644 --- a/retrievalmarket/server/gsunpaidretrieval.go +++ b/retrievalmarket/server/gsunpaidretrieval.go @@ -6,19 +6,17 @@ import ( "fmt" "sync" - "github.com/filecoin-project/boost-gfm/retrievalmarket" - retrievalimpl "github.com/filecoin-project/boost-gfm/retrievalmarket/impl" - "github.com/filecoin-project/boost-gfm/retrievalmarket/migrations" graphsync "github.com/filecoin-project/boost-graphsync" + datatransfer2 "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/datatransfer/encoding" + "github.com/filecoin-project/boost/datatransfer/message" + "github.com/filecoin-project/boost/datatransfer/network" + "github.com/filecoin-project/boost/datatransfer/registry" + "github.com/filecoin-project/boost/datatransfer/transport/graphsync/extension" "github.com/filecoin-project/boost/metrics" + "github.com/filecoin-project/boost/node/modules" "github.com/filecoin-project/boost/piecedirectory" - "github.com/filecoin-project/boost/retrievalmarket/types" - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-data-transfer/encoding" - "github.com/filecoin-project/go-data-transfer/message" - "github.com/filecoin-project/go-data-transfer/network" - "github.com/filecoin-project/go-data-transfer/registry" - "github.com/filecoin-project/go-data-transfer/transport/graphsync/extension" + "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" "github.com/filecoin-project/go-state-types/abi" "github.com/hannahhoward/go-pubsub" logging "github.com/ipfs/go-log/v2" @@ -39,7 +37,7 @@ var incomingReqExtensions = []graphsync.ExtensionName{ // Uniquely identify a request (requesting peer + data transfer id) type reqId struct { p peer.ID - id datatransfer.TransferID + id datatransfer2.TransferID } type LinkSystemProvider interface { @@ -76,23 +74,15 @@ var defaultExtensions = []graphsync.ExtensionName{ } type ValidationDeps struct { - DealDecider retrievalimpl.DealDecider + DealDecider DealDecider PieceDirectory *piecedirectory.PieceDirectory SectorAccessor SectorAccessor - AskStore AskGetter + AskStore *modules.RetrievalAskGetter } -func NewGraphsyncUnpaidRetrieval(peerID peer.ID, gs graphsync.GraphExchange, dtnet network.DataTransferNetwork, vdeps ValidationDeps, ls LinkSystemProvider) (*GraphsyncUnpaidRetrieval, error) { +func NewGraphsyncUnpaidRetrieval(peerID peer.ID, gs graphsync.GraphExchange, dtnet network.DataTransferNetwork, vdeps ValidationDeps) (*GraphsyncUnpaidRetrieval, error) { typeRegistry := registry.NewRegistry() - err := typeRegistry.Register(&retrievalmarket.DealProposal{}, nil) - if err != nil { - return nil, err - } - err = typeRegistry.Register(&migrations.DealProposal0{}, nil) - if err != nil { - return nil, err - } - err = typeRegistry.Register(&types.LegsVoucherDTv1{}, nil) + err := typeRegistry.Register(&legacyretrievaltypes.DealProposal{}, nil) if err != nil { return nil, err } @@ -106,7 +96,6 @@ func NewGraphsyncUnpaidRetrieval(peerID peer.ID, gs graphsync.GraphExchange, dtn pubSubMkts: pubsub.New(eventDispatcherMkts), validator: newRequestValidator(vdeps), activeRetrievals: make(map[reqId]*retrievalState), - linkSystem: ls, }, nil } @@ -114,21 +103,11 @@ func (g *GraphsyncUnpaidRetrieval) Start(ctx context.Context) error { g.ctx = ctx g.validator.ctx = ctx - if g.linkSystem != nil && g.linkSystem.LinkSys() != nil { - // The index provider uses graphsync to fetch advertisements. - // We need to tell graphsync to use a different IPLD Link System to provide - // the advertisements (instead of using the blockstore). - err := g.RegisterPersistenceOption("indexstore", *g.linkSystem.LinkSys()) - if err != nil { - return fmt.Errorf("setting persistence option for index advertisement retrieval: %w", err) - } - } - return nil } // Called when a new request is received -func (g *GraphsyncUnpaidRetrieval) trackTransfer(p peer.ID, id datatransfer.TransferID, state *retrievalState) { +func (g *GraphsyncUnpaidRetrieval) trackTransfer(p peer.ID, id datatransfer2.TransferID, state *retrievalState) { // Record the transfer as an active retrieval so we can distinguish between // retrievals intercepted by this class, and those passed through to the // paid retrieval implementation. @@ -144,7 +123,7 @@ func (g *GraphsyncUnpaidRetrieval) trackTransfer(p peer.ID, id datatransfer.Tran // Called when a request completes (either successfully or in failure) // TODO: Make sure that untrackTransfer is always called eventually // (may need to add a timeout) -func (g *GraphsyncUnpaidRetrieval) untrackTransfer(p peer.ID, id datatransfer.TransferID) { +func (g *GraphsyncUnpaidRetrieval) untrackTransfer(p peer.ID, id datatransfer2.TransferID) { g.activeRetrievalsLk.Lock() delete(g.activeRetrievals, reqId{p: p, id: id}) g.activeRetrievalsLk.Unlock() @@ -152,7 +131,7 @@ func (g *GraphsyncUnpaidRetrieval) untrackTransfer(p peer.ID, id datatransfer.Tr g.dtnet.Unprotect(p, fmt.Sprintf("%d", id)) } -func (g *GraphsyncUnpaidRetrieval) CancelTransfer(ctx context.Context, id datatransfer.TransferID, p *peer.ID) error { +func (g *GraphsyncUnpaidRetrieval) CancelTransfer(ctx context.Context, id datatransfer2.TransferID, p *peer.ID) error { g.activeRetrievalsLk.Lock() var state *retrievalState @@ -249,7 +228,7 @@ func (g *GraphsyncUnpaidRetrieval) interceptRetrieval(p peer.ID, request graphsy return false, nil } - dtRequest := msg.(datatransfer.Request) + dtRequest := msg.(datatransfer2.Request) if !dtRequest.IsNew() && !dtRequest.IsRestart() { // The request is not for a new retrieval (it's a cancel etc). // If this message is for an existing unpaid retrieval it will already @@ -260,45 +239,17 @@ func (g *GraphsyncUnpaidRetrieval) interceptRetrieval(p peer.ID, request graphsy } // The request is for a new transfer / restart transfer, so check if it's - // for an unpaid retrieval - voucher, decodeErr := g.decodeVoucher(dtRequest, g.decoder) + // for an unpaid retrieval. We are explicitly checking for voucher type to be + // legacyretrievaltypes.DealProposal{}. Rest are all rejected at this stage. + _, decodeErr := g.decodeVoucher(dtRequest, g.decoder) if decodeErr != nil { - // If we don't recognize the voucher, don't intercept the retrieval. - // Instead it will be passed through to the legacy code for processing. - if !errors.Is(decodeErr, unknownVoucherErr) { - return false, fmt.Errorf("decoding new request voucher: %w", decodeErr) - } + return false, fmt.Errorf("decoding new request voucher: %w", decodeErr) } - switch v := voucher.(type) { - case *types.LegsVoucherDTv1: - // This is a go-legs voucher (used by the network indexer to retrieve - // deal announcements) - - // Treat it the same way as a retrieval deal proposal with no payment - params, err := retrievalmarket.NewParamsV1(abi.NewTokenAmount(0), 0, 0, request.Selector(), nil, abi.NewTokenAmount(0)) - if err != nil { - return false, err - } - proposal := retrievalmarket.DealProposal{ - PayloadCID: request.Root(), - Params: params, - } - return g.handleRetrievalDeal(p, msg, proposal, request, RetrievalTypeLegs) - case *retrievalmarket.DealProposal: - // This is a retrieval deal - proposal := *v - return g.handleRetrievalDeal(p, msg, proposal, request, RetrievalTypeDeal) - case *migrations.DealProposal0: - // This is a retrieval deal with an older format - proposal := migrations.MigrateDealProposal0To1(*v) - return g.handleRetrievalDeal(p, msg, proposal, request, RetrievalTypeDeal) - } - - return false, nil + return g.handleRetrievalDeal(p, msg, legacyretrievaltypes.DealProposal{}, request, RetrievalTypeDeal) } -func (g *GraphsyncUnpaidRetrieval) handleRetrievalDeal(peerID peer.ID, msg datatransfer.Message, proposal retrievalmarket.DealProposal, request graphsync.RequestData, retType RetrievalType) (bool, error) { +func (g *GraphsyncUnpaidRetrieval) handleRetrievalDeal(peerID peer.ID, msg datatransfer2.Message, proposal legacyretrievaltypes.DealProposal, request graphsync.RequestData, retType RetrievalType) (bool, error) { // If it's a paid retrieval, do not intercept it if !proposal.UnsealPrice.IsZero() || !proposal.PricePerByte.IsZero() { return false, nil @@ -316,14 +267,14 @@ func (g *GraphsyncUnpaidRetrieval) handleRetrievalDeal(peerID peer.ID, msg datat selector: &cbg.Deferred{Raw: selBytes}, sender: g.peerID, recipient: peerID, - status: datatransfer.Requested, + status: datatransfer2.Requested, isPull: true, } - mktsState := &retrievalmarket.ProviderDealState{ + mktsState := &legacyretrievaltypes.ProviderDealState{ DealProposal: proposal, - ChannelID: &datatransfer.ChannelID{ID: msg.TransferID(), Initiator: peerID, Responder: g.peerID}, - Status: retrievalmarket.DealStatusNew, + ChannelID: &datatransfer2.ChannelID{ID: msg.TransferID(), Initiator: peerID, Responder: g.peerID}, + Status: legacyretrievaltypes.DealStatusNew, Receiver: peerID, FundsReceived: abi.NewTokenAmount(0), } @@ -339,7 +290,7 @@ func (g *GraphsyncUnpaidRetrieval) handleRetrievalDeal(peerID peer.ID, msg datat g.trackTransfer(peerID, msg.TransferID(), state) // Fire transfer queued event - g.publishDTEvent(datatransfer.TransferRequestQueued, "", cs) + g.publishDTEvent(datatransfer2.TransferRequestQueued, "", cs) // This is an unpaid retrieval, so this class is responsible for // handling it @@ -371,8 +322,8 @@ func (g *GraphsyncUnpaidRetrieval) RegisterIncomingRequestHook(hook graphsync.On if msg.IsRestart() { dtOpenMsg += " (restart)" } - g.publishDTEvent(datatransfer.Open, dtOpenMsg, state.cs) - g.publishMktsEvent(retrievalmarket.ProviderEventOpen, *state.mkts) + g.publishDTEvent(datatransfer2.Open, dtOpenMsg, state.cs) + g.publishMktsEvent(legacyretrievaltypes.ProviderEventOpen, *state.mkts) err := func() error { voucher, decodeErr := g.decodeVoucher(msg, g.decoder) @@ -381,22 +332,11 @@ func (g *GraphsyncUnpaidRetrieval) RegisterIncomingRequestHook(hook graphsync.On } // Validate the request - var res datatransfer.VoucherResult - var validateErr error - - if _, ok := voucher.(*types.LegsVoucherDTv1); ok { - // It's a go-legs voucher, so we need to tell Graphsync to - // use a different IPLD Link System to serve the data (instead - // of using the regular blockstore) - res = &types.LegsVoucherResultDtv1{} - validateErr = nil - hookActions.UsePersistenceOption("indexstore") - } else { - res, validateErr = g.validator.validatePullRequest(msg.IsRestart(), p, voucher, request.Root(), request.Selector()) - } + res, validateErr := g.validator.validatePullRequest(msg.IsRestart(), p, voucher, request.Root(), request.Selector()) + isAccepted := validateErr == nil const isPaused = false // There are no payments required, so never pause - resultType := datatransfer.EmptyTypeIdentifier + resultType := datatransfer2.EmptyTypeIdentifier if res != nil { resultType = res.Type() } @@ -437,12 +377,12 @@ func (g *GraphsyncUnpaidRetrieval) RegisterIncomingRequestHook(hook graphsync.On hookActions.ValidateRequest() // Fire events - state.cs.status = datatransfer.Ongoing - g.publishDTEvent(datatransfer.Accept, "", state.cs) - state.mkts.Status = retrievalmarket.DealStatusUnsealing - g.publishMktsEvent(retrievalmarket.ProviderEventDealAccepted, *state.mkts) - state.mkts.Status = retrievalmarket.DealStatusUnsealed - g.publishMktsEvent(retrievalmarket.ProviderEventUnsealComplete, *state.mkts) + state.cs.status = datatransfer2.Ongoing + g.publishDTEvent(datatransfer2.Accept, "", state.cs) + state.mkts.Status = legacyretrievaltypes.DealStatusUnsealing + g.publishMktsEvent(legacyretrievaltypes.ProviderEventDealAccepted, *state.mkts) + state.mkts.Status = legacyretrievaltypes.DealStatusUnsealed + g.publishMktsEvent(legacyretrievaltypes.ProviderEventUnsealComplete, *state.mkts) stats.Record(g.ctx, metrics.GraphsyncRequestStartedUnpaidSuccessCount.M(1)) }) @@ -498,28 +438,18 @@ func (g *GraphsyncUnpaidRetrieval) RegisterCompletedResponseListener(listener gr } // Fire markets blocks completed event - state.mkts.Status = retrievalmarket.DealStatusBlocksComplete - g.publishMktsEvent(retrievalmarket.ProviderEventBlocksCompleted, *state.mkts) + state.mkts.Status = legacyretrievaltypes.DealStatusBlocksComplete + g.publishMktsEvent(legacyretrievaltypes.ProviderEventBlocksCompleted, *state.mkts) // Include a markets protocol Completed message in the response - var voucherResult encoding.Encodable - var voucherType datatransfer.TypeIdentifier - if state.retType == RetrievalTypeDeal { - dealResponse := &retrievalmarket.DealResponse{ - ID: state.mkts.DealProposal.ID, - Status: retrievalmarket.DealStatusCompleted, - } - voucherResult = dealResponse - voucherType = dealResponse.Type() - } else { - legsResponse := &types.LegsVoucherResultDtv1{} - voucherResult = legsResponse - voucherType = legsResponse.Type() + dealResponse := &legacyretrievaltypes.DealResponse{ + ID: state.mkts.DealProposal.ID, + Status: legacyretrievaltypes.DealStatusCompleted, } const isAccepted = true const isPaused = false - respMsg, err := message.CompleteResponse(msg.TransferID(), isAccepted, isPaused, voucherType, voucherResult) + respMsg, err := message.CompleteResponse(msg.TransferID(), isAccepted, isPaused, dealResponse.Type(), dealResponse) if err != nil { g.failTransfer(state, fmt.Errorf("getting complete response: %w", err)) return @@ -533,11 +463,11 @@ func (g *GraphsyncUnpaidRetrieval) RegisterCompletedResponseListener(listener gr return } - state.cs.status = datatransfer.Completed - g.publishDTEvent(datatransfer.Complete, "", state.cs) + state.cs.status = datatransfer2.Completed + g.publishDTEvent(datatransfer2.Complete, "", state.cs) // Fire markets blocks completed event - state.mkts.Status = retrievalmarket.DealStatusCompleted - g.publishMktsEvent(retrievalmarket.ProviderEventComplete, *state.mkts) + state.mkts.Status = legacyretrievaltypes.DealStatusCompleted + g.publishMktsEvent(legacyretrievaltypes.ProviderEventComplete, *state.mkts) stats.Record(g.ctx, metrics.GraphsyncRequestCompletedUnpaidSuccessCount.M(1)) log.Infow("successfully sent completion message to requestor", "peer", p) @@ -556,10 +486,10 @@ func (g *GraphsyncUnpaidRetrieval) RegisterRequestorCancelledListener(listener g return } - state.cs.status = datatransfer.Cancelled - g.publishDTEvent(datatransfer.Cancel, "client cancelled", state.cs) - state.mkts.Status = retrievalmarket.DealStatusCancelled - g.publishMktsEvent(retrievalmarket.ProviderEventCancelComplete, *state.mkts) + state.cs.status = datatransfer2.Cancelled + g.publishDTEvent(datatransfer2.Cancel, "client cancelled", state.cs) + state.mkts.Status = legacyretrievaltypes.DealStatusCancelled + g.publishMktsEvent(legacyretrievaltypes.ProviderEventCancelComplete, *state.mkts) g.untrackTransfer(p, state.cs.transferID) @@ -593,7 +523,7 @@ func (g *GraphsyncUnpaidRetrieval) RegisterBlockSentListener(listener graphsync. // Fire block sent event state.cs.sent += block.BlockSizeOnWire() - g.publishDTEvent(datatransfer.DataSent, "", state.cs) + g.publishDTEvent(datatransfer2.DataSent, "", state.cs) state.mkts.TotalSent += block.BlockSizeOnWire() stats.Record(g.ctx, metrics.GraphsyncRequestBlockSentCount.M(1)) @@ -627,11 +557,11 @@ func (g *GraphsyncUnpaidRetrieval) RegisterNetworkErrorListener(listener graphsy } func (g *GraphsyncUnpaidRetrieval) failTransfer(state *retrievalState, err error) { - state.cs.status = datatransfer.Failed + state.cs.status = datatransfer2.Failed state.cs.message = err.Error() - g.publishDTEvent(datatransfer.Error, err.Error(), state.cs) - state.mkts.Status = retrievalmarket.DealStatusErrored - g.publishMktsEvent(retrievalmarket.ProviderEventDataTransferError, *state.mkts) + g.publishDTEvent(datatransfer2.Error, err.Error(), state.cs) + state.mkts.Status = legacyretrievaltypes.DealStatusErrored + g.publishMktsEvent(legacyretrievaltypes.ProviderEventDataTransferError, *state.mkts) g.untrackTransfer(state.cs.recipient, state.cs.transferID) log.Infow("transfer failed", "transfer id", state.cs.transferID, "peer", state.cs.recipient, "err", err) @@ -639,7 +569,7 @@ func (g *GraphsyncUnpaidRetrieval) failTransfer(state *retrievalState, err error var unknownVoucherErr = errors.New("unknown voucher type") -func (g *GraphsyncUnpaidRetrieval) decodeVoucher(request datatransfer.Request, registry *registry.Registry) (datatransfer.Voucher, error) { +func (g *GraphsyncUnpaidRetrieval) decodeVoucher(request datatransfer2.Request, registry *registry.Registry) (datatransfer2.Voucher, error) { vtypStr := request.VoucherType() decoder, has := registry.Decoder(vtypStr) if !has { @@ -649,10 +579,10 @@ func (g *GraphsyncUnpaidRetrieval) decodeVoucher(request datatransfer.Request, r if err != nil { return nil, err } - return encodable.(datatransfer.Registerable), nil + return encodable.(datatransfer2.Registerable), nil } -func (g *GraphsyncUnpaidRetrieval) isRequestForActiveUnpaidRetrieval(p peer.ID, request graphsync.RequestData) (datatransfer.Request, *retrievalState, bool) { +func (g *GraphsyncUnpaidRetrieval) isRequestForActiveUnpaidRetrieval(p peer.ID, request graphsync.RequestData) (datatransfer2.Request, *retrievalState, bool) { // Extract the data transfer message from the Graphsync request msg, err := extension.GetTransferData(request, defaultExtensions) if err != nil { @@ -669,7 +599,7 @@ func (g *GraphsyncUnpaidRetrieval) isRequestForActiveUnpaidRetrieval(p peer.ID, return nil, nil, false } - dtRequest := msg.(datatransfer.Request) + dtRequest := msg.(datatransfer2.Request) state, ok := g.isActiveUnpaidRetrieval(reqId{p: p, id: msg.TransferID()}) return dtRequest, state, ok } @@ -682,6 +612,6 @@ func (g *GraphsyncUnpaidRetrieval) isActiveUnpaidRetrieval(id reqId) (*retrieval return state, ok } -func (g *GraphsyncUnpaidRetrieval) SubscribeToValidationEvents(sub retrievalmarket.ProviderValidationSubscriber) retrievalmarket.Unsubscribe { +func (g *GraphsyncUnpaidRetrieval) SubscribeToValidationEvents(sub ProviderValidationSubscriber) legacyretrievaltypes.Unsubscribe { return g.validator.Subscribe(sub) } diff --git a/retrievalmarket/server/gsunpaidretrieval_test.go b/retrievalmarket/server/gsunpaidretrieval_test.go index fccd1df88..ae330ebe6 100644 --- a/retrievalmarket/server/gsunpaidretrieval_test.go +++ b/retrievalmarket/server/gsunpaidretrieval_test.go @@ -1,42 +1,12 @@ package server import ( - "context" "errors" - "fmt" - "io" - "os" "testing" - "time" - "github.com/filecoin-project/boost-gfm/retrievalmarket" - retrievalimpl "github.com/filecoin-project/boost-gfm/retrievalmarket/impl" - "github.com/filecoin-project/boost-gfm/retrievalmarket/impl/askstore" - "github.com/filecoin-project/boost-gfm/retrievalmarket/impl/testnodes" - rmnet "github.com/filecoin-project/boost-gfm/retrievalmarket/network" - tut "github.com/filecoin-project/boost-gfm/shared_testutil" - graphsyncimpl "github.com/filecoin-project/boost-graphsync/impl" - "github.com/filecoin-project/boost-graphsync/network" - "github.com/filecoin-project/boost-graphsync/storeutil" - "github.com/filecoin-project/boost/piecedirectory" - bdclientutil "github.com/filecoin-project/boostd-data/clientutil" - "github.com/filecoin-project/boostd-data/model" - "github.com/filecoin-project/go-address" - datatransfer "github.com/filecoin-project/go-data-transfer" - dtimpl "github.com/filecoin-project/go-data-transfer/impl" - "github.com/filecoin-project/go-data-transfer/testutil" - dtgstransport "github.com/filecoin-project/go-data-transfer/transport/graphsync" - "github.com/filecoin-project/go-state-types/abi" - "github.com/google/uuid" + "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" logging "github.com/ipfs/go-log/v2" - "github.com/ipld/go-car/v2" - basicnode "github.com/ipld/go-ipld-prime/node/basic" - "github.com/ipld/go-ipld-prime/traversal/selector" - "github.com/ipld/go-ipld-prime/traversal/selector/builder" - "github.com/stretchr/testify/require" ) var tlog = logging.Logger("testgs") @@ -44,8 +14,8 @@ var tlog = logging.Logger("testgs") type testCase struct { name string reqPayloadCid cid.Cid - watch func(client retrievalmarket.RetrievalClient, gsupr *GraphsyncUnpaidRetrieval) - ask *retrievalmarket.Ask + watch func(client legacyretrievaltypes.RetrievalClient, gsupr *GraphsyncUnpaidRetrieval) + ask *legacyretrievaltypes.Ask noUnsealedCopy bool expectErr bool expectClientCancelEvent bool @@ -58,316 +28,308 @@ var clientCancelled = errors.New("client cancelled") var clientRejected = errors.New("client received reject response") func TestGS(t *testing.T) { + t.Skip("refactor tests to use boost client") //_ = logging.SetLogLevel("testgs", "debug") _ = logging.SetLogLevel("testgs", "info") //_ = logging.SetLogLevel("dt-impl", "debug") - missingCid := cid.MustParse("baguqeeraaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") - - testCases := []testCase{{ - name: "happy path", - }, { - name: "request missing payload cid", - reqPayloadCid: missingCid, - expectErr: true, - }, { - name: "request for piece with no unsealed sectors", - noUnsealedCopy: true, - expectErr: true, - expectRejection: "no unsealed piece", - }, { - name: "request for non-zero price per byte", - ask: &retrievalmarket.Ask{ - UnsealPrice: abi.NewTokenAmount(0), - PricePerByte: abi.NewTokenAmount(1), - }, - expectErr: true, - expectRejection: "ask price is non-zero", - }, { - // Note: we disregard the unseal price because we only serve deals - // with an unsealed piece, so the unseal price is irrelevant. - // Therefore the retrieval should succeed for non-zero unseal price. - name: "request for non-zero unseal price", - ask: &retrievalmarket.Ask{ - UnsealPrice: abi.NewTokenAmount(1), - PricePerByte: abi.NewTokenAmount(0), - }, - }, { - name: "cancel request after sending 2 blocks", - watch: func(client retrievalmarket.RetrievalClient, gsupr *GraphsyncUnpaidRetrieval) { - count := 0 - gsupr.outgoingBlockHook = func(state *retrievalState) { - count++ - if count == 2 { - tlog.Debug("cancelling client deal") - err := client.CancelDeal(state.mkts.ID) - require.NoError(t, err) - } - if count == 10 { - tlog.Warn("sending last block but client cancel hasn't arrived yet") - } - } - }, - expectClientCancelEvent: true, - expectProviderCancelEvent: true, - }, { - name: "provider cancel request after sending 2 blocks", - watch: func(client retrievalmarket.RetrievalClient, gsupr *GraphsyncUnpaidRetrieval) { - count := 0 - gsupr.outgoingBlockHook = func(state *retrievalState) { - count++ - if count == 2 { - tlog.Debug("provider cancelling client deal") - err := gsupr.CancelTransfer(context.TODO(), state.cs.transferID, &state.cs.recipient) - require.NoError(t, err) - } - if count == 10 { - tlog.Warn("sending last block but client cancel hasn't arrived yet") - } - } - }, - expectErr: true, - expectClientCancelEvent: true, - }, { - name: "provider cancel request after sending 2 blocks without peer id", - watch: func(client retrievalmarket.RetrievalClient, gsupr *GraphsyncUnpaidRetrieval) { - count := 0 - gsupr.outgoingBlockHook = func(state *retrievalState) { - count++ - if count == 2 { - tlog.Debug("provider cancelling client deal") - err := gsupr.CancelTransfer(context.TODO(), state.cs.transferID, nil) - require.NoError(t, err) - } - if count == 10 { - tlog.Warn("sending last block but client cancel hasn't arrived yet") - } - } - }, - expectErr: true, - expectClientCancelEvent: true, - }} - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - runRequestTest(t, tc) - }) - } -} - -func runRequestTest(t *testing.T, tc testCase) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - // Create a CAR file and set up mocks - testData := tut.NewLibp2pTestData(ctx, t) - - carRootCid, carFilePath := piecedirectory.CreateCarFile(t) - carFile, err := os.Open(carFilePath) - require.NoError(t, err) - defer carFile.Close() - - // Create a random CAR file - carReader, err := car.OpenReader(carFilePath) - require.NoError(t, err) - defer carReader.Close() - carv1Reader, err := carReader.DataReader() - require.NoError(t, err) - - // Any calls to get a reader over data should return a reader over the random CAR file - pr := piecedirectory.CreateMockPieceReader(t, carv1Reader) - - carv1Bytes, err := io.ReadAll(carv1Reader) - require.NoError(t, err) - carSize := len(carv1Bytes) - - maddr := address.TestAddress - pieceCid := tut.GenerateCids(1)[0] - sectorID := abi.SectorNumber(1) - offset := abi.PaddedPieceSize(0) - dealInfo := model.DealInfo{ - DealUuid: uuid.New().String(), - ChainDealID: abi.DealID(1), - MinerAddr: maddr, - SectorID: sectorID, - PieceOffset: offset, - PieceLength: abi.UnpaddedPieceSize(carSize).Padded(), - } - - askStore, err := askstore.NewAskStore(namespace.Wrap(testData.Ds1, datastore.NewKey("retrieval-ask")), datastore.NewKey("latest")) - require.NoError(t, err) - ask := &retrievalmarket.Ask{UnsealPrice: abi.NewTokenAmount(0), PricePerByte: abi.NewTokenAmount(0)} - if tc.ask != nil { - ask = tc.ask - } - err = askStore.SetAsk(ask) - require.NoError(t, err) - - cl := bdclientutil.NewTestStore(ctx) - defer cl.Close(ctx) - - pd := piecedirectory.NewPieceDirectory(cl, pr, 1) - pd.Start(ctx) - err = pd.AddDealForPiece(ctx, pieceCid, dealInfo) - require.NoError(t, err) - - vdeps := ValidationDeps{ - PieceDirectory: pd, - SectorAccessor: &mockSectorAccessor{ - unsealed: !tc.noUnsealedCopy, - }, - AskStore: askStore, - } - - // Create a blockstore over the CAR file blocks - carDataBs, err := pd.GetBlockstore(ctx, pieceCid) - require.NoError(t, err) - - // Wrap graphsync with the graphsync unpaid retrieval interceptor - linkSystem2 := storeutil.LinkSystemForBlockstore(carDataBs) - gs2 := graphsyncimpl.New(ctx, network.NewFromLibp2pHost(testData.Host2), linkSystem2) - gsupr, err := NewGraphsyncUnpaidRetrieval(testData.Host2.ID(), gs2, testData.DTNet2, vdeps, nil) - require.NoError(t, err) - - // Create a Graphsync transport and call SetEventHandler, which registers - // listeners for all the Graphsync hooks. - gsTransport := dtgstransport.NewTransport(testData.Host2.ID(), gsupr) - err = gsTransport.SetEventHandler(nil) - require.NoError(t, err) - - // Create the retrieval provider with the graphsync unpaid retrieval interceptor - paymentAddress := address.TestAddress2 - - gsupr.SubscribeToDataTransferEvents(func(event datatransfer.Event, channelState datatransfer.ChannelState) { - tlog.Debugf("prov dt: %s %s / %s", datatransfer.Events[event.Code], event.Message, datatransfer.Statuses[channelState.Status()]) - }) - err = gsupr.Start(ctx) - require.NoError(t, err) - - // Create a retrieval client - retrievalPeer := retrievalmarket.RetrievalPeer{ - Address: paymentAddress, - ID: testData.Host2.ID(), - } - retrievalClientNode := testnodes.NewTestRetrievalClientNode(testnodes.TestRetrievalClientNodeParams{}) - retrievalClientNode.ExpectKnownAddresses(retrievalPeer, nil) - client := createRetrievalClient(ctx, t, testData, retrievalClientNode) - tut.StartAndWaitForReady(ctx, t, client) - - if tc.watch != nil { - tc.watch(client, gsupr) - } - - // Watch for provider completion - providerResChan := make(chan error) - gsupr.SubscribeToMarketsEvents(func(event retrievalmarket.ProviderEvent, state retrievalmarket.ProviderDealState) { - tlog.Debugf("prov mkt: %s %s %s", retrievalmarket.ProviderEvents[event], state.Status.String(), state.Message) - switch event { - case retrievalmarket.ProviderEventComplete: - providerResChan <- nil - case retrievalmarket.ProviderEventCancelComplete: - providerResChan <- providerCancelled - case retrievalmarket.ProviderEventDataTransferError: - providerResChan <- errors.New(state.Message) - } - }) - - // Watch for client completion - clientResChan := make(chan error) - client.SubscribeToEvents(func(event retrievalmarket.ClientEvent, state retrievalmarket.ClientDealState) { - tlog.Debugf("clnt mkt: %s %s %s", event.String(), state.Status.String(), state.Message) - switch event { - case retrievalmarket.ClientEventComplete: - clientResChan <- nil - case retrievalmarket.ClientEventCancelComplete: - clientResChan <- clientCancelled - case retrievalmarket.ClientEventDealRejected: - clientResChan <- fmt.Errorf("%s :%w", state.Message, clientRejected) - case retrievalmarket.ClientEventDataTransferError: - clientResChan <- errors.New(state.Message) - } - }) - - // Retrieve the data - tlog.Infof("Retrieve cid %s from peer %s", carRootCid, retrievalPeer.ID) - // Use an explore-all but add unixfs-preload to make sure we have UnixFS - // ADL support wired up. - ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) - sel := ssb.ExploreInterpretAs("unixfs-preload", ssb.ExploreRecursive( - selector.RecursionLimitNone(), - ssb.ExploreAll(ssb.ExploreRecursiveEdge()), - )).Node() - params, err := retrievalmarket.NewParamsV1(abi.NewTokenAmount(0), 0, 0, sel, nil, abi.NewTokenAmount(0)) - require.NoError(t, err) - if tc.reqPayloadCid != cid.Undef { - carRootCid = tc.reqPayloadCid - } - _, err = client.Retrieve(ctx, 1, carRootCid, params, abi.NewTokenAmount(0), retrievalPeer, address.TestAddress, address.TestAddress2) - require.NoError(t, err) - - // Wait for provider completion - err = waitFor(ctx, t, providerResChan) - if tc.expectErr || tc.expectProviderCancelEvent { - require.Error(t, err) - if tc.expectProviderCancelEvent { - require.EqualError(t, err, providerCancelled.Error()) - } - } else { - require.NoError(t, err) - } - - // Wait for client completion - err = waitFor(ctx, t, clientResChan) - if tc.expectErr || tc.expectClientCancelEvent { - require.Error(t, err) - if tc.expectClientCancelEvent { - require.EqualError(t, err, clientCancelled.Error()) - } else if tc.expectRejection != "" { - require.ErrorContains(t, err, tc.expectRejection) - } - } else { - require.NoError(t, err) - } - - // final verification -- the server has no active graphsync requests - stats := gsupr.GraphExchange.Stats() - require.Equal(t, stats.IncomingRequests.Active, uint64(0)) + //missingCid := cid.MustParse("baguqeeraaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + // + //testCases := []testCase{{ + // name: "happy path", + //}, { + // name: "request missing payload cid", + // reqPayloadCid: missingCid, + // expectErr: true, + //}, { + // name: "request for piece with no unsealed sectors", + // noUnsealedCopy: true, + // expectErr: true, + // expectRejection: "no unsealed piece", + //}, { + // name: "request for non-zero price per byte", + // ask: &legacyretrievaltypes.Ask{ + // UnsealPrice: abi.NewTokenAmount(0), + // PricePerByte: abi.NewTokenAmount(1), + // }, + // expectErr: true, + // expectRejection: "ask price is non-zero", + //}, { + // // Note: we disregard the unseal price because we only serve deals + // // with an unsealed piece, so the unseal price is irrelevant. + // // Therefore the retrieval should succeed for non-zero unseal price. + // name: "request for non-zero unseal price", + // ask: &legacyretrievaltypes.Ask{ + // UnsealPrice: abi.NewTokenAmount(1), + // PricePerByte: abi.NewTokenAmount(0), + // }, + //}, { + // name: "cancel request after sending 2 blocks", + // watch: func(client legacyretrievaltypes.RetrievalClient, gsupr *GraphsyncUnpaidRetrieval) { + // count := 0 + // gsupr.outgoingBlockHook = func(state *retrievalState) { + // count++ + // if count == 2 { + // tlog.Debug("cancelling client deal") + // err := client.CancelDeal(state.mkts.ID) + // require.NoError(t, err) + // } + // if count == 10 { + // tlog.Warn("sending last block but client cancel hasn't arrived yet") + // } + // } + // }, + // expectClientCancelEvent: true, + // expectProviderCancelEvent: true, + //}, { + // name: "provider cancel request after sending 2 blocks", + // watch: func(client legacyretrievaltypes.RetrievalClient, gsupr *GraphsyncUnpaidRetrieval) { + // count := 0 + // gsupr.outgoingBlockHook = func(state *retrievalState) { + // count++ + // if count == 2 { + // tlog.Debug("provider cancelling client deal") + // err := gsupr.CancelTransfer(context.TODO(), state.cs.transferID, &state.cs.recipient) + // require.NoError(t, err) + // } + // if count == 10 { + // tlog.Warn("sending last block but client cancel hasn't arrived yet") + // } + // } + // }, + // expectErr: true, + // expectClientCancelEvent: true, + //}, { + // name: "provider cancel request after sending 2 blocks without peer id", + // watch: func(client legacyretrievaltypes.RetrievalClient, gsupr *GraphsyncUnpaidRetrieval) { + // count := 0 + // gsupr.outgoingBlockHook = func(state *retrievalState) { + // count++ + // if count == 2 { + // tlog.Debug("provider cancelling client deal") + // err := gsupr.CancelTransfer(context.TODO(), state.cs.transferID, nil) + // require.NoError(t, err) + // } + // if count == 10 { + // tlog.Warn("sending last block but client cancel hasn't arrived yet") + // } + // } + // }, + // expectErr: true, + // expectClientCancelEvent: true, + //}} + // + //for _, tc := range testCases { + // t.Run(tc.name, func(t *testing.T) { + // runRequestTest(t, tc) + // }) + //} } -func createRetrievalClient(ctx context.Context, t *testing.T, testData *tut.Libp2pTestData, retrievalClientNode *testnodes.TestRetrievalClientNode) retrievalmarket.RetrievalClient { - nw1 := rmnet.NewFromLibp2pHost(testData.Host1, rmnet.RetryParameters(0, 0, 0, 0)) - gs1 := graphsyncimpl.New(ctx, network.NewFromLibp2pHost(testData.Host1), testData.LinkSystem1) - dtTransport1 := dtgstransport.NewTransport(testData.Host1.ID(), gs1) - dt1, err := dtimpl.NewDataTransfer(testData.DTStore1, testData.DTNet1, dtTransport1) - require.NoError(t, err) - testutil.StartAndWaitForReady(ctx, t, dt1) - require.NoError(t, err) - clientDs := namespace.Wrap(testData.Ds1, datastore.NewKey("/retrievals/client")) - ba := tut.NewTestRetrievalBlockstoreAccessor() - client, err := retrievalimpl.NewClient(nw1, dt1, retrievalClientNode, &tut.TestPeerResolver{}, clientDs, ba) - require.NoError(t, err) - - dt1.SubscribeToEvents(func(event datatransfer.Event, channelState datatransfer.ChannelState) { - tlog.Debugf("client dt: %s %s / %s", datatransfer.Events[event.Code], event.Message, datatransfer.Statuses[channelState.Status()]) - }) - - return client -} - -func waitFor(ctx context.Context, t *testing.T, resChan chan error) error { - var err error - select { - case <-ctx.Done(): - require.Fail(t, "test timed out") - case err = <-resChan: - } - return err -} - -type mockSectorAccessor struct { - unsealed bool -} - -func (m *mockSectorAccessor) IsUnsealed(ctx context.Context, minerAddr address.Address, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (bool, error) { - return m.unsealed, nil -} +//func runRequestTest(t *testing.T, tc testCase) { +// ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) +// defer cancel() +// +// // Create a CAR file and set up mocks +// testData := shared_tut.NewLibp2pTestData(ctx, t) +// +// carRootCid, carFilePath := piecedirectory.CreateCarFile(t) +// carFile, err := os.Open(carFilePath) +// require.NoError(t, err) +// defer carFile.Close() +// +// // Create a random CAR file +// carReader, err := car.OpenReader(carFilePath) +// require.NoError(t, err) +// defer carReader.Close() +// carv1Reader, err := carReader.DataReader() +// require.NoError(t, err) +// +// // Any calls to get a reader over data should return a reader over the random CAR file +// pr := piecedirectory.CreateMockPieceReader(t, carv1Reader) +// +// carv1Bytes, err := io.ReadAll(carv1Reader) +// require.NoError(t, err) +// carSize := len(carv1Bytes) +// +// maddr := address.TestAddress +// pieceCid := shared_tut.GenerateCids(1)[0] +// sectorID := abi.SectorNumber(1) +// offset := abi.PaddedPieceSize(0) +// dealInfo := model.DealInfo{ +// DealUuid: uuid.New().String(), +// ChainDealID: abi.DealID(1), +// MinerAddr: maddr, +// SectorID: sectorID, +// PieceOffset: offset, +// PieceLength: abi.UnpaddedPieceSize(carSize).Padded(), +// } +// +// cl := bdclientutil.NewTestStore(ctx) +// defer cl.Close(ctx) +// +// pd := piecedirectory.NewPieceDirectory(cl, pr, 1) +// pd.Start(ctx) +// err = pd.AddDealForPiece(ctx, pieceCid, dealInfo) +// require.NoError(t, err) +// +// vdeps := ValidationDeps{ +// PieceDirectory: pd, +// SectorAccessor: &mockSectorAccessor{ +// unsealed: !tc.noUnsealedCopy, +// }, +// AskStore: modules.NewRetrievalAskGetter(), +// } +// +// // Create a blockstore over the CAR file blocks +// carDataBs, err := pd.GetBlockstore(ctx, pieceCid) +// require.NoError(t, err) +// +// // Wrap graphsync with the graphsync unpaid retrieval interceptor +// linkSystem2 := storeutil.LinkSystemForBlockstore(carDataBs) +// gs2 := graphsyncimpl.New(ctx, network.NewFromLibp2pHost(testData.Host2), linkSystem2) +// gsupr, err := NewGraphsyncUnpaidRetrieval(testData.Host2.ID(), gs2, testData.DTNet2, vdeps) +// require.NoError(t, err) +// +// // Create a Graphsync transport and call SetEventHandler, which registers +// // listeners for all the Graphsync hooks. +// gsTransport := dtgstransport.NewTransport(testData.Host2.ID(), gsupr) +// err = gsTransport.SetEventHandler(nil) +// require.NoError(t, err) +// +// // Create the retrieval provider with the graphsync unpaid retrieval interceptor +// paymentAddress := address.TestAddress2 +// +// gsupr.SubscribeToDataTransferEvents(func(event datatransfer.Event, channelState datatransfer.ChannelState) { +// tlog.Debugf("prov dt: %s %s / %s", datatransfer.Events[event.Code], event.Message, datatransfer.Statuses[channelState.Status()]) +// }) +// err = gsupr.Start(ctx) +// require.NoError(t, err) +// +// // Create a retrieval client +// retrievalPeer := legacyretrievaltypes.RetrievalPeer{ +// Address: paymentAddress, +// ID: testData.Host2.ID(), +// } +// retrievalClientNode := testnodes.NewTestRetrievalClientNode(testnodes.TestRetrievalClientNodeParams{}) +// retrievalClientNode.ExpectKnownAddresses(retrievalPeer, nil) +// client := createRetrievalClient(ctx, t, testData, retrievalClientNode) +// shared_tut.StartAndWaitForReady(ctx, t, client) +// +// if tc.watch != nil { +// tc.watch(client, gsupr) +// } +// +// // Watch for provider completion +// providerResChan := make(chan error) +// gsupr.SubscribeToMarketsEvents(func(event legacyretrievaltypes.ProviderEvent, state legacyretrievaltypes.ProviderDealState) { +// tlog.Debugf("prov mkt: %s %s %s", legacyretrievaltypes.ProviderEvents[event], state.Status.String(), state.Message) +// switch event { +// case legacyretrievaltypes.ProviderEventComplete: +// providerResChan <- nil +// case legacyretrievaltypes.ProviderEventCancelComplete: +// providerResChan <- providerCancelled +// case legacyretrievaltypes.ProviderEventDataTransferError: +// providerResChan <- errors.New(state.Message) +// } +// }) +// +// // Watch for client completion +// clientResChan := make(chan error) +// client.SubscribeToEvents(func(event legacyretrievaltypes.ClientEvent, state legacyretrievaltypes.ClientDealState) { +// tlog.Debugf("clnt mkt: %s %s %s", event.String(), state.Status.String(), state.Message) +// switch event { +// case legacyretrievaltypes.ClientEventComplete: +// clientResChan <- nil +// case legacyretrievaltypes.ClientEventCancelComplete: +// clientResChan <- clientCancelled +// case legacyretrievaltypes.ClientEventDealRejected: +// clientResChan <- fmt.Errorf("%s :%w", state.Message, clientRejected) +// case legacyretrievaltypes.ClientEventDataTransferError: +// clientResChan <- errors.New(state.Message) +// } +// }) +// +// // Retrieve the data +// tlog.Infof("Retrieve cid %s from peer %s", carRootCid, retrievalPeer.ID) +// // Use an explore-all but add unixfs-preload to make sure we have UnixFS +// // ADL support wired up. +// ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) +// sel := ssb.ExploreInterpretAs("unixfs-preload", ssb.ExploreRecursive( +// selector.RecursionLimitNone(), +// ssb.ExploreAll(ssb.ExploreRecursiveEdge()), +// )).Node() +// params, err := legacyretrievaltypes.NewParamsV1(abi.NewTokenAmount(0), 0, 0, sel, nil, abi.NewTokenAmount(0)) +// require.NoError(t, err) +// if tc.reqPayloadCid != cid.Undef { +// carRootCid = tc.reqPayloadCid +// } +// _, err = client.Retrieve(ctx, 1, carRootCid, params, abi.NewTokenAmount(0), retrievalPeer, address.TestAddress, address.TestAddress2) +// require.NoError(t, err) +// +// // Wait for provider completion +// err = waitFor(ctx, t, providerResChan) +// if tc.expectErr || tc.expectProviderCancelEvent { +// require.Error(t, err) +// if tc.expectProviderCancelEvent { +// require.EqualError(t, err, providerCancelled.Error()) +// } +// } else { +// require.NoError(t, err) +// } +// +// // Wait for client completion +// err = waitFor(ctx, t, clientResChan) +// if tc.expectErr || tc.expectClientCancelEvent { +// require.Error(t, err) +// if tc.expectClientCancelEvent { +// require.EqualError(t, err, clientCancelled.Error()) +// } else if tc.expectRejection != "" { +// require.ErrorContains(t, err, tc.expectRejection) +// } +// } else { +// require.NoError(t, err) +// } +// +// // final verification -- the server has no active graphsync requests +// stats := gsupr.GraphExchange.Stats() +// require.Equal(t, stats.IncomingRequests.Active, uint64(0)) +//} +// +//func createRetrievalClient(ctx context.Context, t *testing.T, testData *shared_tut.Libp2pTestData, retrievalClientNode *testnodes.TestRetrievalClientNode) legacyretrievaltypes.RetrievalClient { +// nw1 := rmnet.NewFromLibp2pHost(testData.Host1, rmnet.RetryParameters(0, 0, 0, 0)) +// gs1 := graphsyncimpl.New(ctx, network.NewFromLibp2pHost(testData.Host1), testData.LinkSystem1) +// dtTransport1 := dtgstransport.NewTransport(testData.Host1.ID(), gs1) +// dt1, err := dtimpl.NewDataTransfer(testData.DTStore1, testData.DTNet1, dtTransport1) +// require.NoError(t, err) +// testutil.StartAndWaitForReady(ctx, t, dt1) +// require.NoError(t, err) +// clientDs := namespace.Wrap(testData.Ds1, datastore.NewKey("/retrievals/client")) +// ba := tut.NewTestRetrievalBlockstoreAccessor() +// client, err := retrievalimpl.NewClient(nw1, dt1, retrievalClientNode, &tut.TestPeerResolver{}, clientDs, ba) +// require.NoError(t, err) +// +// dt1.SubscribeToEvents(func(event datatransfer.Event, channelState datatransfer.ChannelState) { +// tlog.Debugf("client dt: %s %s / %s", datatransfer.Events[event.Code], event.Message, datatransfer.Statuses[channelState.Status()]) +// }) +// +// return client +//} +// +//func waitFor(ctx context.Context, t *testing.T, resChan chan error) error { +// var err error +// select { +// case <-ctx.Done(): +// require.Fail(t, "test timed out") +// case err = <-resChan: +// } +// return err +//} +// +//type mockSectorAccessor struct { +// unsealed bool +//} +// +//func (m *mockSectorAccessor) IsUnsealed(ctx context.Context, minerAddr address.Address, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (bool, error) { +// return m.unsealed, nil +//} diff --git a/retrievalmarket/server/provider_pieces.go b/retrievalmarket/server/provider_pieces.go index b22ac0b6d..27c7f947a 100644 --- a/retrievalmarket/server/provider_pieces.go +++ b/retrievalmarket/server/provider_pieces.go @@ -3,6 +3,7 @@ package server import ( "context" "fmt" + "github.com/filecoin-project/boost/piecedirectory" "github.com/filecoin-project/boostd-data/model" "github.com/filecoin-project/go-state-types/abi" @@ -13,11 +14,6 @@ import ( "github.com/multiformats/go-multihash" ) -// This code is copied directly from -// https://github.com/filecoin-project/go-fil-markets/blob/955fd43fad7da2e68539c257f0c8199a6b0c2a4d/retrievalmarket/impl/provider_pieces.go#L1 -// TODO: Create a PR against go-fil-markets to make these methods public, -// so that we can import them from go-fil-markets instead of copying the code here. - // MaxIdentityCIDBytes is the largest identity CID as a PayloadCID that we are // willing to decode const MaxIdentityCIDBytes = 2 << 10 diff --git a/retrievalmarket/server/queryask.go b/retrievalmarket/server/queryask.go index 728d16a99..db7d70037 100644 --- a/retrievalmarket/server/queryask.go +++ b/retrievalmarket/server/queryask.go @@ -4,8 +4,11 @@ import ( "context" "errors" "fmt" - "github.com/filecoin-project/boost-gfm/retrievalmarket" + "time" + + "github.com/filecoin-project/boost/node/modules" "github.com/filecoin-project/boost/piecedirectory" + "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" "github.com/filecoin-project/go-address" cborutil "github.com/filecoin-project/go-cbor-util" "github.com/filecoin-project/go-state-types/big" @@ -13,7 +16,6 @@ import ( "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" - "time" ) // The time limit to read a message from the client when the client opens a stream @@ -29,12 +31,12 @@ type QueryAskHandler struct { minerAddress address.Address pd *piecedirectory.PieceDirectory sa SectorAccessor - askStore AskGetter + askStore modules.RetrievalAskGetter full v1api.FullNode host host.Host } -func NewQueryAskHandler(host host.Host, maddr address.Address, pd *piecedirectory.PieceDirectory, sa SectorAccessor, askStore AskGetter, full v1api.FullNode) *QueryAskHandler { +func NewQueryAskHandler(host host.Host, maddr address.Address, pd *piecedirectory.PieceDirectory, sa SectorAccessor, askStore modules.RetrievalAskGetter, full v1api.FullNode) *QueryAskHandler { return &QueryAskHandler{ host: host, minerAddress: maddr, @@ -46,11 +48,11 @@ func NewQueryAskHandler(host host.Host, maddr address.Address, pd *piecedirector } func (qa *QueryAskHandler) Start() { - qa.host.SetStreamHandler(retrievalmarket.QueryProtocolID, qa.HandleQueryStream) + qa.host.SetStreamHandler(legacyretrievaltypes.QueryProtocolID, qa.HandleQueryStream) } func (qa *QueryAskHandler) Stop() { - qa.host.RemoveStreamHandler(retrievalmarket.QueryProtocolID) + qa.host.RemoveStreamHandler(legacyretrievaltypes.QueryProtocolID) } func (qa *QueryAskHandler) HandleQueryStream(stream network.Stream) { @@ -59,7 +61,7 @@ func (qa *QueryAskHandler) HandleQueryStream(stream network.Stream) { // Set a deadline on reading from the stream so it doesn't hang _ = stream.SetReadDeadline(time.Now().Add(providerReadDeadline)) - var query retrievalmarket.Query + var query legacyretrievaltypes.Query err := query.UnmarshalCBOR(stream) _ = stream.SetReadDeadline(time.Time{}) // Clear read deadline so conn doesn't get closed if err != nil { @@ -71,13 +73,13 @@ func (qa *QueryAskHandler) HandleQueryStream(stream network.Stream) { defer cancel() answer, err := qa.getQueryResponse(ctx, query) if err != nil { - status := retrievalmarket.QueryResponseError - if errors.Is(err, retrievalmarket.ErrNotFound) { - status = retrievalmarket.QueryResponseUnavailable + status := legacyretrievaltypes.QueryResponseError + if errors.Is(err, legacyretrievaltypes.ErrNotFound) { + status = legacyretrievaltypes.QueryResponseUnavailable } - answer = &retrievalmarket.QueryResponse{ + answer = &legacyretrievaltypes.QueryResponse{ Status: status, - PieceCIDFound: retrievalmarket.QueryItemUnavailable, + PieceCIDFound: legacyretrievaltypes.QueryItemUnavailable, PaymentAddress: qa.minerAddress, MinPricePerByte: big.Zero(), UnsealPrice: big.Zero(), @@ -94,7 +96,7 @@ func (qa *QueryAskHandler) HandleQueryStream(stream network.Stream) { } } -func (qa *QueryAskHandler) getQueryResponse(ctx context.Context, query retrievalmarket.Query) (*retrievalmarket.QueryResponse, error) { +func (qa *QueryAskHandler) getQueryResponse(ctx context.Context, query legacyretrievaltypes.Query) (*legacyretrievaltypes.QueryResponse, error) { // Fetch the payment address the client should send the payment to head, err := qa.full.ChainHead(ctx) if err != nil { @@ -120,10 +122,10 @@ func (qa *QueryAskHandler) getQueryResponse(ctx context.Context, query retrieval pieceInfo, _ := GetBestPieceInfoMatch(ctx, qa.sa, pieces, pieceCID) if !pieceInfo.PieceCID.Defined() { - if piecesErr != nil && !errors.Is(piecesErr, retrievalmarket.ErrNotFound) { + if piecesErr != nil && !errors.Is(piecesErr, legacyretrievaltypes.ErrNotFound) { return nil, fmt.Errorf("fetching piece to retrieve from: %w", piecesErr) } - return nil, fmt.Errorf("getting pieces for payload cid %s: %w", query.PayloadCID, retrievalmarket.ErrNotFound) + return nil, fmt.Errorf("getting pieces for payload cid %s: %w", query.PayloadCID, legacyretrievaltypes.ErrNotFound) } if len(pieceInfo.Deals) == 0 { @@ -143,11 +145,11 @@ func (qa *QueryAskHandler) getQueryResponse(ctx context.Context, query retrieval return nil, errors.New("no ask configured in ask-store") } - return &retrievalmarket.QueryResponse{ + return &legacyretrievaltypes.QueryResponse{ PaymentAddress: minerInfo.Worker, - Status: retrievalmarket.QueryResponseAvailable, + Status: legacyretrievaltypes.QueryResponseAvailable, Size: uint64(pieceInfo.Deals[0].PieceLength.Unpadded()), - PieceCIDFound: retrievalmarket.QueryItemAvailable, + PieceCIDFound: legacyretrievaltypes.QueryItemAvailable, MinPricePerByte: currAsk.PricePerByte, MaxPaymentInterval: currAsk.PaymentInterval, MaxPaymentIntervalIncrease: currAsk.PaymentIntervalIncrease, diff --git a/retrievalmarket/server/types.go b/retrievalmarket/server/types.go index 786064ac3..8e1791db3 100644 --- a/retrievalmarket/server/types.go +++ b/retrievalmarket/server/types.go @@ -2,15 +2,15 @@ package server import ( "context" - "github.com/filecoin-project/boost-gfm/retrievalmarket" + + "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" ) -type AskGetter interface { - GetAsk() *retrievalmarket.Ask -} - type SectorAccessor interface { IsUnsealed(ctx context.Context, minerAddr address.Address, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (bool, error) } + +// DealDecider is a function that makes a decision about whether to accept a deal +type DealDecider func(ctx context.Context, state legacyretrievaltypes.ProviderDealState) (bool, string, error) diff --git a/retrievalmarket/server/validation.go b/retrievalmarket/server/validation.go index d0116075a..662d1f712 100644 --- a/retrievalmarket/server/validation.go +++ b/retrievalmarket/server/validation.go @@ -6,9 +6,10 @@ import ( "errors" "fmt" - "github.com/filecoin-project/boost-gfm/retrievalmarket" - "github.com/filecoin-project/boost-gfm/retrievalmarket/migrations" - datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/boost/datatransfer" + + "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" + "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes/migrations" "github.com/hannahhoward/go-pubsub" "github.com/ipfs/go-cid" "github.com/ipld/go-ipld-prime" @@ -39,7 +40,7 @@ func newRequestValidator(vdeps ValidationDeps) *requestValidator { // request to pull data or a new request created when the data transfer is // restarted (eg after a connection failure). func (rv *requestValidator) validatePullRequest(isRestart bool, receiver peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) (datatransfer.VoucherResult, error) { - proposal, ok := voucher.(*retrievalmarket.DealProposal) + proposal, ok := voucher.(*legacyretrievaltypes.DealProposal) var legacyProtocol bool if !ok { legacyProposal, ok := voucher.(*migrations.DealProposal0) @@ -51,7 +52,7 @@ func (rv *requestValidator) validatePullRequest(isRestart bool, receiver peer.ID legacyProtocol = true } response, err := rv.validatePull(receiver, proposal, legacyProtocol, baseCid, selector) - _ = rv.psub.Publish(retrievalmarket.ProviderValidationEvent{ + _ = rv.psub.Publish(legacyretrievaltypes.ProviderValidationEvent{ IsRestart: isRestart, Receiver: receiver, Proposal: proposal, @@ -72,16 +73,16 @@ func (rv *requestValidator) validatePullRequest(isRestart bool, receiver peer.ID return &response, err } -func (rv *requestValidator) validatePull(receiver peer.ID, proposal *retrievalmarket.DealProposal, legacyProtocol bool, baseCid cid.Cid, selector ipld.Node) (retrievalmarket.DealResponse, error) { - response := retrievalmarket.DealResponse{ +func (rv *requestValidator) validatePull(receiver peer.ID, proposal *legacyretrievaltypes.DealProposal, legacyProtocol bool, baseCid cid.Cid, selector ipld.Node) (legacyretrievaltypes.DealResponse, error) { + response := legacyretrievaltypes.DealResponse{ ID: proposal.ID, - Status: retrievalmarket.DealStatusAccepted, + Status: legacyretrievaltypes.DealStatusAccepted, } // Decide whether to accept the deal err := rv.acceptDeal(receiver, proposal, legacyProtocol, baseCid, selector) if err != nil { - response.Status = retrievalmarket.DealStatusRejected + response.Status = legacyretrievaltypes.DealStatusRejected response.Message = err.Error() return response, err } @@ -89,7 +90,7 @@ func (rv *requestValidator) validatePull(receiver peer.ID, proposal *retrievalma return response, nil } -func (rv *requestValidator) acceptDeal(receiver peer.ID, proposal *retrievalmarket.DealProposal, legacyProtocol bool, baseCid cid.Cid, selector ipld.Node) error { +func (rv *requestValidator) acceptDeal(receiver peer.ID, proposal *legacyretrievaltypes.DealProposal, legacyProtocol bool, baseCid cid.Cid, selector ipld.Node) error { // Check the proposal CID matches if proposal.PayloadCID != baseCid { return errors.New("incorrect CID for this proposal") @@ -112,7 +113,7 @@ func (rv *requestValidator) acceptDeal(receiver peer.ID, proposal *retrievalmark // Check if the piece is unsealed _, isUnsealed, err := rv.getPiece(proposal.PayloadCID, proposal.PieceCID) if err != nil { - if err == retrievalmarket.ErrNotFound { + if err == legacyretrievaltypes.ErrNotFound { return fmt.Errorf("there is no piece containing payload cid %s: %w", proposal.PayloadCID, err) } return err @@ -139,7 +140,7 @@ func (rv *requestValidator) acceptDeal(receiver peer.ID, proposal *retrievalmark // Check the deal filter if rv.DealDecider != nil { - state := retrievalmarket.ProviderDealState{ + state := legacyretrievaltypes.ProviderDealState{ DealProposal: *proposal, Receiver: receiver, LegacyProtocol: legacyProtocol, @@ -175,16 +176,16 @@ func (rv *requestValidator) getPiece(payloadCid cid.Cid, pieceCID *cid.Cid) (Pie return PieceInfo{}, false, fmt.Errorf("piece cid not found for payload cid %s", payloadCid.String()) } -func (rv *requestValidator) Subscribe(subscriber retrievalmarket.ProviderValidationSubscriber) retrievalmarket.Unsubscribe { - return retrievalmarket.Unsubscribe(rv.psub.Subscribe(subscriber)) +func (rv *requestValidator) Subscribe(subscriber ProviderValidationSubscriber) legacyretrievaltypes.Unsubscribe { + return legacyretrievaltypes.Unsubscribe(rv.psub.Subscribe(subscriber)) } func queryValidationDispatcher(evt pubsub.Event, subscriberFn pubsub.SubscriberFn) error { - e, ok := evt.(retrievalmarket.ProviderValidationEvent) + e, ok := evt.(legacyretrievaltypes.ProviderValidationEvent) if !ok { return errors.New("wrong type of event") } - cb, ok := subscriberFn.(retrievalmarket.ProviderValidationSubscriber) + cb, ok := subscriberFn.(ProviderValidationSubscriber) if !ok { return errors.New("wrong type of callback") } diff --git a/retrievalmarket/types/legacyretrievaltypes/dealstatus.go b/retrievalmarket/types/legacyretrievaltypes/dealstatus.go new file mode 100644 index 000000000..5070e2537 --- /dev/null +++ b/retrievalmarket/types/legacyretrievaltypes/dealstatus.go @@ -0,0 +1,186 @@ +package legacyretrievaltypes + +import "fmt" + +// DealStatus is the status of a retrieval deal returned by a provider +// in a DealResponse +type DealStatus uint64 + +const ( + // DealStatusNew is a deal that nothing has happened with yet + DealStatusNew DealStatus = iota + + // DealStatusUnsealing means the provider is unsealing data + DealStatusUnsealing + + // DealStatusUnsealed means the provider has finished unsealing data + DealStatusUnsealed + + // DealStatusWaitForAcceptance means we're waiting to hear back if the provider accepted our deal + DealStatusWaitForAcceptance + + // DealStatusPaymentChannelCreating is the status set while waiting for the + // payment channel creation to complete + DealStatusPaymentChannelCreating + + // DealStatusPaymentChannelAddingFunds is the status when we are waiting for funds + // to finish being sent to the payment channel + DealStatusPaymentChannelAddingFunds + + // DealStatusAccepted means a deal has been accepted by a provider + // and its is ready to proceed with retrieval + DealStatusAccepted + + // DealStatusFundsNeededUnseal means a deal has been accepted by a provider + // and payment is needed to unseal the data + DealStatusFundsNeededUnseal + + // DealStatusFailing indicates something went wrong during a retrieval, + // and we are cleaning up before terminating with an error + DealStatusFailing + + // DealStatusRejected indicates the provider rejected a client's deal proposal + // for some reason + DealStatusRejected + + // DealStatusFundsNeeded indicates the provider needs a payment voucher to + // continue processing the deal + DealStatusFundsNeeded + + // DealStatusSendFunds indicates the client is now going to send funds because we reached the threshold of the last payment + DealStatusSendFunds + + // DealStatusSendFundsLastPayment indicates the client is now going to send final funds because + // we reached the threshold of the final payment + DealStatusSendFundsLastPayment + + // DealStatusOngoing indicates the provider is continuing to process a deal + DealStatusOngoing + + // DealStatusFundsNeededLastPayment indicates the provider needs a payment voucher + // in order to complete a deal + DealStatusFundsNeededLastPayment + + // DealStatusCompleted indicates a deal is complete + DealStatusCompleted + + // DealStatusDealNotFound indicates an update was received for a deal that could + // not be identified + DealStatusDealNotFound + + // DealStatusErrored indicates a deal has terminated in an error + DealStatusErrored + + // DealStatusBlocksComplete indicates that all blocks have been processed for the piece + DealStatusBlocksComplete + + // DealStatusFinalizing means the last payment has been received and + // we are just confirming the deal is complete + DealStatusFinalizing + + // DealStatusCompleting is just an inbetween state to perform final cleanup of + // complete deals + DealStatusCompleting + + // DealStatusCheckComplete is used for when the provided completes without a last payment + // requested cycle, to verify we have received all blocks + DealStatusCheckComplete + + // DealStatusCheckFunds means we are looking at the state of funding for the channel to determine + // if more money is incoming + DealStatusCheckFunds + + // DealStatusInsufficientFunds indicates we have depleted funds for the retrieval payment channel + // - we can resume after funds are added + DealStatusInsufficientFunds + + // DealStatusPaymentChannelAllocatingLane is the status when we are making a lane for this channel + DealStatusPaymentChannelAllocatingLane + + // DealStatusCancelling means we are cancelling an inprogress deal + DealStatusCancelling + + // DealStatusCancelled means a deal has been cancelled + DealStatusCancelled + + // DealStatusRetryLegacy means we're attempting the deal proposal for a second time using the legacy datatype + DealStatusRetryLegacy + + // DealStatusWaitForAcceptanceLegacy means we're waiting to hear the results on the legacy protocol + DealStatusWaitForAcceptanceLegacy + + // DealStatusClientWaitingForLastBlocks means that the provider has told + // the client that all blocks were sent for the deal, and the client is + // waiting for the last blocks to arrive. This should only happen when + // the deal price per byte is zero (if it's not zero the provider asks + // for final payment after sending the last blocks). + DealStatusClientWaitingForLastBlocks + + // DealStatusPaymentChannelAddingInitialFunds means that a payment channel + // exists from an earlier deal between client and provider, but we need + // to add funds to the channel for this particular deal + DealStatusPaymentChannelAddingInitialFunds + + // DealStatusErroring means that there was an error and we need to + // do some cleanup before moving to the error state + DealStatusErroring + + // DealStatusRejecting means that the deal was rejected and we need to do + // some cleanup before moving to the rejected state + DealStatusRejecting + + // DealStatusDealNotFoundCleanup means that the deal was not found and we + // need to do some cleanup before moving to the not found state + DealStatusDealNotFoundCleanup + + // DealStatusFinalizingBlockstore means that all blocks have been received, + // and the blockstore is being finalized + DealStatusFinalizingBlockstore +) + +// DealStatuses maps deal status to a human readable representation +var DealStatuses = map[DealStatus]string{ + DealStatusNew: "DealStatusNew", + DealStatusUnsealing: "DealStatusUnsealing", + DealStatusUnsealed: "DealStatusUnsealed", + DealStatusWaitForAcceptance: "DealStatusWaitForAcceptance", + DealStatusPaymentChannelCreating: "DealStatusPaymentChannelCreating", + DealStatusPaymentChannelAddingFunds: "DealStatusPaymentChannelAddingFunds", + DealStatusAccepted: "DealStatusAccepted", + DealStatusFundsNeededUnseal: "DealStatusFundsNeededUnseal", + DealStatusFailing: "DealStatusFailing", + DealStatusRejected: "DealStatusRejected", + DealStatusFundsNeeded: "DealStatusFundsNeeded", + DealStatusSendFunds: "DealStatusSendFunds", + DealStatusSendFundsLastPayment: "DealStatusSendFundsLastPayment", + DealStatusOngoing: "DealStatusOngoing", + DealStatusFundsNeededLastPayment: "DealStatusFundsNeededLastPayment", + DealStatusCompleted: "DealStatusCompleted", + DealStatusDealNotFound: "DealStatusDealNotFound", + DealStatusErrored: "DealStatusErrored", + DealStatusBlocksComplete: "DealStatusBlocksComplete", + DealStatusFinalizing: "DealStatusFinalizing", + DealStatusCompleting: "DealStatusCompleting", + DealStatusCheckComplete: "DealStatusCheckComplete", + DealStatusCheckFunds: "DealStatusCheckFunds", + DealStatusInsufficientFunds: "DealStatusInsufficientFunds", + DealStatusPaymentChannelAllocatingLane: "DealStatusPaymentChannelAllocatingLane", + DealStatusCancelling: "DealStatusCancelling", + DealStatusCancelled: "DealStatusCancelled", + DealStatusRetryLegacy: "DealStatusRetryLegacy", + DealStatusWaitForAcceptanceLegacy: "DealStatusWaitForAcceptanceLegacy", + DealStatusClientWaitingForLastBlocks: "DealStatusWaitingForLastBlocks", + DealStatusPaymentChannelAddingInitialFunds: "DealStatusPaymentChannelAddingInitialFunds", + DealStatusErroring: "DealStatusErroring", + DealStatusRejecting: "DealStatusRejecting", + DealStatusDealNotFoundCleanup: "DealStatusDealNotFoundCleanup", + DealStatusFinalizingBlockstore: "DealStatusFinalizingBlockstore", +} + +func (s DealStatus) String() string { + str, ok := DealStatuses[s] + if ok { + return str + } + return fmt.Sprintf("DealStatusUnknown - %d", s) +} diff --git a/retrievalmarket/types/legacyretrievaltypes/events.go b/retrievalmarket/types/legacyretrievaltypes/events.go new file mode 100644 index 000000000..a1a86079e --- /dev/null +++ b/retrievalmarket/types/legacyretrievaltypes/events.go @@ -0,0 +1,281 @@ +package legacyretrievaltypes + +import "fmt" + +// ClientEvent is an event that occurs in a deal lifecycle on the client +type ClientEvent uint64 + +const ( + // ClientEventOpen indicates a deal was initiated + ClientEventOpen ClientEvent = iota + + // ClientEventWriteDealProposalErrored means a network error writing a deal proposal + ClientEventWriteDealProposalErrored + + // ClientEventDealProposed means a deal was successfully sent to a miner + ClientEventDealProposed + + // ClientEventDealRejected means a deal was rejected by the provider + ClientEventDealRejected + + // ClientEventDealNotFound means a provider could not find a piece for a deal + ClientEventDealNotFound + + // ClientEventDealAccepted means a provider accepted a deal + ClientEventDealAccepted + + // ClientEventProviderCancelled means a provider has sent a message to cancel a deal + ClientEventProviderCancelled + + // ClientEventUnknownResponseReceived means a client received a response it doesn't + // understand from the provider + ClientEventUnknownResponseReceived + + // ClientEventPaymentChannelErrored means there was a failure creating a payment channel + ClientEventPaymentChannelErrored + + // ClientEventAllocateLaneErrored means there was a failure creating a lane in a payment channel + ClientEventAllocateLaneErrored + + // ClientEventPaymentChannelCreateInitiated means we are waiting for a message to + // create a payment channel to appear on chain + ClientEventPaymentChannelCreateInitiated + + // ClientEventPaymentChannelReady means the newly created payment channel is ready for the + // deal to resume + ClientEventPaymentChannelReady + + // ClientEventPaymentChannelAddingFunds mean we are waiting for funds to be + // added to a payment channel + ClientEventPaymentChannelAddingFunds + + // ClientEventPaymentChannelAddFundsErrored means that adding funds to the payment channel + // failed + ClientEventPaymentChannelAddFundsErrored + + // ClientEventLastPaymentRequested indicates the provider requested a final payment + ClientEventLastPaymentRequested + + // ClientEventAllBlocksReceived indicates the provider has sent all blocks + ClientEventAllBlocksReceived + + // ClientEventPaymentRequested indicates the provider requested a payment + ClientEventPaymentRequested + + // ClientEventUnsealPaymentRequested indicates the provider requested a payment for unsealing the sector + ClientEventUnsealPaymentRequested + + // ClientEventBlocksReceived indicates the provider has sent blocks + ClientEventBlocksReceived + + // ClientEventSendFunds emits when we reach the threshold to send the next payment + ClientEventSendFunds + + // ClientEventFundsExpended indicates a deal has run out of funds in the payment channel + // forcing the client to add more funds to continue the deal + ClientEventFundsExpended // when totalFunds is expended + + // ClientEventBadPaymentRequested indicates the provider asked for funds + // in a way that does not match the terms of the deal + ClientEventBadPaymentRequested + + // ClientEventCreateVoucherFailed indicates an error happened creating a payment voucher + ClientEventCreateVoucherFailed + + // ClientEventWriteDealPaymentErrored indicates a network error trying to write a payment + ClientEventWriteDealPaymentErrored + + // ClientEventPaymentSent indicates a payment was sent to the provider + ClientEventPaymentSent + + // ClientEventComplete is fired when the provider sends a message + // indicating that a deal has completed + ClientEventComplete + + // ClientEventDataTransferError emits when something go wrong at the data transfer level + ClientEventDataTransferError + + // ClientEventCancelComplete happens when a deal cancellation is transmitted to the provider + ClientEventCancelComplete + + // ClientEventEarlyTermination indications a provider send a deal complete without sending all data + ClientEventEarlyTermination + + // ClientEventCompleteVerified means that a provider completed without requesting a final payment but + // we verified we received all data + ClientEventCompleteVerified + + // ClientEventLaneAllocated is called when a lane is allocated + ClientEventLaneAllocated + + // ClientEventVoucherShortfall means we tried to create a voucher but did not have enough funds in channel + // to create it + ClientEventVoucherShortfall + + // ClientEventRecheckFunds runs when an external caller indicates there may be new funds in a payment channel + ClientEventRecheckFunds + + // ClientEventCancel runs when a user cancels a deal + ClientEventCancel + + // ClientEventWaitForLastBlocks is fired when the provider has told + // the client that all blocks were sent for the deal, and the client is + // waiting for the last blocks to arrive + ClientEventWaitForLastBlocks + + // ClientEventPaymentChannelSkip is fired when the total deal price is zero + // so there's no need to set up a payment channel + ClientEventPaymentChannelSkip + + // ClientEventPaymentNotSent indicates that payment was requested, but no + // payment was actually due, so a voucher was not sent to the provider + ClientEventPaymentNotSent + + // ClientEventBlockstoreFinalized is fired when the blockstore has been + // finalized after receiving all blocks + ClientEventBlockstoreFinalized + + // ClientEventFinalizeBlockstoreErrored is fired when there is an error + // finalizing the blockstore + ClientEventFinalizeBlockstoreErrored +) + +// ClientEvents is a human readable map of client event name -> event description +var ClientEvents = map[ClientEvent]string{ + ClientEventOpen: "ClientEventOpen", + ClientEventPaymentChannelErrored: "ClientEventPaymentChannelErrored", + ClientEventDealProposed: "ClientEventDealProposed", + ClientEventAllocateLaneErrored: "ClientEventAllocateLaneErrored", + ClientEventPaymentChannelCreateInitiated: "ClientEventPaymentChannelCreateInitiated", + ClientEventPaymentChannelReady: "ClientEventPaymentChannelReady", + ClientEventPaymentChannelAddingFunds: "ClientEventPaymentChannelAddingFunds", + ClientEventPaymentChannelAddFundsErrored: "ClientEventPaymentChannelAddFundsErrored", + ClientEventWriteDealProposalErrored: "ClientEventWriteDealProposalErrored", + ClientEventDealRejected: "ClientEventDealRejected", + ClientEventDealNotFound: "ClientEventDealNotFound", + ClientEventDealAccepted: "ClientEventDealAccepted", + ClientEventProviderCancelled: "ClientEventProviderCancelled", + ClientEventUnknownResponseReceived: "ClientEventUnknownResponseReceived", + ClientEventLastPaymentRequested: "ClientEventLastPaymentRequested", + ClientEventAllBlocksReceived: "ClientEventAllBlocksReceived", + ClientEventPaymentRequested: "ClientEventPaymentRequested", + ClientEventUnsealPaymentRequested: "ClientEventUnsealPaymentRequested", + ClientEventBlocksReceived: "ClientEventBlocksReceived", + ClientEventSendFunds: "ClientEventSendFunds", + ClientEventFundsExpended: "ClientEventFundsExpended", + ClientEventBadPaymentRequested: "ClientEventBadPaymentRequested", + ClientEventCreateVoucherFailed: "ClientEventCreateVoucherFailed", + ClientEventWriteDealPaymentErrored: "ClientEventWriteDealPaymentErrored", + ClientEventPaymentSent: "ClientEventPaymentSent", + ClientEventDataTransferError: "ClientEventDataTransferError", + ClientEventComplete: "ClientEventComplete", + ClientEventCancelComplete: "ClientEventCancelComplete", + ClientEventEarlyTermination: "ClientEventEarlyTermination", + ClientEventCompleteVerified: "ClientEventCompleteVerified", + ClientEventLaneAllocated: "ClientEventLaneAllocated", + ClientEventVoucherShortfall: "ClientEventVoucherShortfall", + ClientEventRecheckFunds: "ClientEventRecheckFunds", + ClientEventCancel: "ClientEventCancel", + ClientEventWaitForLastBlocks: "ClientEventWaitForLastBlocks", + ClientEventPaymentChannelSkip: "ClientEventPaymentChannelSkip", + ClientEventPaymentNotSent: "ClientEventPaymentNotSent", + ClientEventBlockstoreFinalized: "ClientEventBlockstoreFinalized", + ClientEventFinalizeBlockstoreErrored: "ClientEventFinalizeBlockstoreErrored", +} + +func (e ClientEvent) String() string { + s, ok := ClientEvents[e] + if ok { + return s + } + return fmt.Sprintf("ClientEventUnknown: %d", e) +} + +// ProviderEvent is an event that occurs in a deal lifecycle on the provider +type ProviderEvent uint64 + +const ( + // ProviderEventOpen indicates a new deal was received from a client + ProviderEventOpen ProviderEvent = iota + + // ProviderEventDealNotFound happens when the provider cannot find the piece for the + // deal proposed by the client + ProviderEventDealNotFound + + // ProviderEventDealRejected happens when a provider rejects a deal proposed + // by the client + ProviderEventDealRejected + + // ProviderEventDealAccepted happens when a provider accepts a deal + ProviderEventDealAccepted + + // ProviderEventBlockSent happens when the provider reads another block + // in the piece + ProviderEventBlockSent + + // ProviderEventBlocksCompleted happens when the provider reads the last block + // in the piece + ProviderEventBlocksCompleted + + // ProviderEventPaymentRequested happens when a provider asks for payment from + // a client for blocks sent + ProviderEventPaymentRequested + + // ProviderEventSaveVoucherFailed happens when an attempt to save a payment + // voucher fails + ProviderEventSaveVoucherFailed + + // ProviderEventPartialPaymentReceived happens when a provider receives and processes + // a payment that is less than what was requested to proceed with the deal + ProviderEventPartialPaymentReceived + + // ProviderEventPaymentReceived happens when a provider receives a payment + // and resumes processing a deal + ProviderEventPaymentReceived + + // ProviderEventComplete indicates a retrieval deal was completed for a client + ProviderEventComplete + + // ProviderEventUnsealError emits when something wrong occurs while unsealing data + ProviderEventUnsealError + + // ProviderEventUnsealComplete emits when the unsealing process is done + ProviderEventUnsealComplete + + // ProviderEventDataTransferError emits when something go wrong at the data transfer level + ProviderEventDataTransferError + + // ProviderEventCancelComplete happens when a deal cancellation is transmitted to the provider + ProviderEventCancelComplete + + // ProviderEventCleanupComplete happens when a deal is finished cleaning up and enters a complete state + ProviderEventCleanupComplete + + // ProviderEventMultiStoreError occurs when an error happens attempting to operate on the multistore + ProviderEventMultiStoreError + + // ProviderEventClientCancelled happens when the provider gets a cancel message from the client's data transfer + ProviderEventClientCancelled +) + +// ProviderEvents is a human readable map of provider event name -> event description +var ProviderEvents = map[ProviderEvent]string{ + ProviderEventOpen: "ProviderEventOpen", + ProviderEventDealNotFound: "ProviderEventDealNotFound", + ProviderEventDealRejected: "ProviderEventDealRejected", + ProviderEventDealAccepted: "ProviderEventDealAccepted", + ProviderEventBlockSent: "ProviderEventBlockSent", + ProviderEventBlocksCompleted: "ProviderEventBlocksCompleted", + ProviderEventPaymentRequested: "ProviderEventPaymentRequested", + ProviderEventSaveVoucherFailed: "ProviderEventSaveVoucherFailed", + ProviderEventPartialPaymentReceived: "ProviderEventPartialPaymentReceived", + ProviderEventPaymentReceived: "ProviderEventPaymentReceived", + ProviderEventComplete: "ProviderEventComplete", + ProviderEventUnsealError: "ProviderEventUnsealError", + ProviderEventUnsealComplete: "ProviderEventUnsealComplete", + ProviderEventDataTransferError: "ProviderEventDataTransferError", + ProviderEventCancelComplete: "ProviderEventCancelComplete", + ProviderEventCleanupComplete: "ProviderEventCleanupComplete", + ProviderEventMultiStoreError: "ProviderEventMultiStoreError", + ProviderEventClientCancelled: "ProviderEventClientCancelled", +} diff --git a/retrievalmarket/types/legacyretrievaltypes/migrations/maptypes/maptypes.go b/retrievalmarket/types/legacyretrievaltypes/migrations/maptypes/maptypes.go new file mode 100644 index 000000000..95908ef71 --- /dev/null +++ b/retrievalmarket/types/legacyretrievaltypes/migrations/maptypes/maptypes.go @@ -0,0 +1,55 @@ +package maptypes + +import ( + "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/markets/piecestore" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" +) + +//go:generate cbor-gen-for --map-encoding ClientDealState1 ProviderDealState1 + +// Version 1 of the ClientDealState +type ClientDealState1 struct { + legacyretrievaltypes.DealProposal + StoreID *uint64 + ChannelID datatransfer.ChannelID + LastPaymentRequested bool + AllBlocksReceived bool + TotalFunds abi.TokenAmount + ClientWallet address.Address + MinerWallet address.Address + PaymentInfo *legacyretrievaltypes.PaymentInfo + Status legacyretrievaltypes.DealStatus + Sender peer.ID + TotalReceived uint64 + Message string + BytesPaidFor uint64 + CurrentInterval uint64 + PaymentRequested abi.TokenAmount + FundsSpent abi.TokenAmount + UnsealFundsPaid abi.TokenAmount + WaitMsgCID *cid.Cid + VoucherShortfall abi.TokenAmount + LegacyProtocol bool +} + +// Version 1 of the ProviderDealState +type ProviderDealState1 struct { + legacyretrievaltypes.DealProposal + StoreID uint64 + ChannelID datatransfer.ChannelID + PieceInfo *piecestore.PieceInfo + Status legacyretrievaltypes.DealStatus + Receiver peer.ID + TotalSent uint64 + FundsReceived abi.TokenAmount + Message string + CurrentInterval uint64 + LegacyProtocol bool +} diff --git a/retrievalmarket/types/legacyretrievaltypes/migrations/maptypes/maptypes_cbor_gen.go b/retrievalmarket/types/legacyretrievaltypes/migrations/maptypes/maptypes_cbor_gen.go new file mode 100644 index 000000000..ff738788c --- /dev/null +++ b/retrievalmarket/types/legacyretrievaltypes/migrations/maptypes/maptypes_cbor_gen.go @@ -0,0 +1,1142 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package maptypes + +import ( + "fmt" + "io" + "math" + "sort" + + "github.com/filecoin-project/boost/markets/piecestore" + "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +func (t *ClientDealState1) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{181}); err != nil { + return err + } + + // t.Sender (peer.ID) (string) + if len("Sender") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Sender\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Sender"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Sender")); err != nil { + return err + } + + if len(t.Sender) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Sender was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Sender))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Sender)); err != nil { + return err + } + + // t.Status (legacyretrievaltypes.DealStatus) (uint64) + if len("Status") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Status\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Status"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Status")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Message)); err != nil { + return err + } + + // t.StoreID (uint64) (uint64) + if len("StoreID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"StoreID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StoreID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("StoreID")); err != nil { + return err + } + + if t.StoreID == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(*t.StoreID)); err != nil { + return err + } + } + + // t.ChannelID (datatransfer.ChannelID) (struct) + if len("ChannelID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ChannelID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ChannelID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("ChannelID")); err != nil { + return err + } + + if err := t.ChannelID.MarshalCBOR(cw); err != nil { + return err + } + + // t.FundsSpent (big.Int) (struct) + if len("FundsSpent") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FundsSpent\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("FundsSpent"))); err != nil { + return err + } + if _, err := cw.WriteString(string("FundsSpent")); err != nil { + return err + } + + if err := t.FundsSpent.MarshalCBOR(cw); err != nil { + return err + } + + // t.TotalFunds (big.Int) (struct) + if len("TotalFunds") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TotalFunds\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("TotalFunds"))); err != nil { + return err + } + if _, err := cw.WriteString(string("TotalFunds")); err != nil { + return err + } + + if err := t.TotalFunds.MarshalCBOR(cw); err != nil { + return err + } + + // t.WaitMsgCID (cid.Cid) (struct) + if len("WaitMsgCID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"WaitMsgCID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("WaitMsgCID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("WaitMsgCID")); err != nil { + return err + } + + if t.WaitMsgCID == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.WaitMsgCID); err != nil { + return xerrors.Errorf("failed to write cid field t.WaitMsgCID: %w", err) + } + } + + // t.MinerWallet (address.Address) (struct) + if len("MinerWallet") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MinerWallet\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("MinerWallet"))); err != nil { + return err + } + if _, err := cw.WriteString(string("MinerWallet")); err != nil { + return err + } + + if err := t.MinerWallet.MarshalCBOR(cw); err != nil { + return err + } + + // t.PaymentInfo (legacyretrievaltypes.PaymentInfo) (struct) + if len("PaymentInfo") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentInfo\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PaymentInfo"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PaymentInfo")); err != nil { + return err + } + + if err := t.PaymentInfo.MarshalCBOR(cw); err != nil { + return err + } + + // t.BytesPaidFor (uint64) (uint64) + if len("BytesPaidFor") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"BytesPaidFor\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("BytesPaidFor"))); err != nil { + return err + } + if _, err := cw.WriteString(string("BytesPaidFor")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.BytesPaidFor)); err != nil { + return err + } + + // t.ClientWallet (address.Address) (struct) + if len("ClientWallet") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ClientWallet\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ClientWallet"))); err != nil { + return err + } + if _, err := cw.WriteString(string("ClientWallet")); err != nil { + return err + } + + if err := t.ClientWallet.MarshalCBOR(cw); err != nil { + return err + } + + // t.DealProposal (legacyretrievaltypes.DealProposal) (struct) + if len("DealProposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealProposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealProposal"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealProposal")); err != nil { + return err + } + + if err := t.DealProposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.TotalReceived (uint64) (uint64) + if len("TotalReceived") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TotalReceived\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("TotalReceived"))); err != nil { + return err + } + if _, err := cw.WriteString(string("TotalReceived")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TotalReceived)); err != nil { + return err + } + + // t.LegacyProtocol (bool) (bool) + if len("LegacyProtocol") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"LegacyProtocol\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("LegacyProtocol"))); err != nil { + return err + } + if _, err := cw.WriteString(string("LegacyProtocol")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.LegacyProtocol); err != nil { + return err + } + + // t.CurrentInterval (uint64) (uint64) + if len("CurrentInterval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CurrentInterval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("CurrentInterval"))); err != nil { + return err + } + if _, err := cw.WriteString(string("CurrentInterval")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.CurrentInterval)); err != nil { + return err + } + + // t.UnsealFundsPaid (big.Int) (struct) + if len("UnsealFundsPaid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"UnsealFundsPaid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("UnsealFundsPaid"))); err != nil { + return err + } + if _, err := cw.WriteString(string("UnsealFundsPaid")); err != nil { + return err + } + + if err := t.UnsealFundsPaid.MarshalCBOR(cw); err != nil { + return err + } + + // t.PaymentRequested (big.Int) (struct) + if len("PaymentRequested") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentRequested\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PaymentRequested"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PaymentRequested")); err != nil { + return err + } + + if err := t.PaymentRequested.MarshalCBOR(cw); err != nil { + return err + } + + // t.VoucherShortfall (big.Int) (struct) + if len("VoucherShortfall") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"VoucherShortfall\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("VoucherShortfall"))); err != nil { + return err + } + if _, err := cw.WriteString(string("VoucherShortfall")); err != nil { + return err + } + + if err := t.VoucherShortfall.MarshalCBOR(cw); err != nil { + return err + } + + // t.AllBlocksReceived (bool) (bool) + if len("AllBlocksReceived") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"AllBlocksReceived\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("AllBlocksReceived"))); err != nil { + return err + } + if _, err := cw.WriteString(string("AllBlocksReceived")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.AllBlocksReceived); err != nil { + return err + } + + // t.LastPaymentRequested (bool) (bool) + if len("LastPaymentRequested") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"LastPaymentRequested\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("LastPaymentRequested"))); err != nil { + return err + } + if _, err := cw.WriteString(string("LastPaymentRequested")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.LastPaymentRequested); err != nil { + return err + } + return nil +} + +func (t *ClientDealState1) UnmarshalCBOR(r io.Reader) (err error) { + *t = ClientDealState1{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("ClientDealState1: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Sender (peer.ID) (string) + case "Sender": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Sender = peer.ID(sval) + } + // t.Status (legacyretrievaltypes.DealStatus) (uint64) + case "Status": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = legacyretrievaltypes.DealStatus(extra) + + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.StoreID (uint64) (uint64) + case "StoreID": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + typed := uint64(extra) + t.StoreID = &typed + } + + } + // t.ChannelID (datatransfer.ChannelID) (struct) + case "ChannelID": + + { + + if err := t.ChannelID.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ChannelID: %w", err) + } + + } + // t.FundsSpent (big.Int) (struct) + case "FundsSpent": + + { + + if err := t.FundsSpent.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.FundsSpent: %w", err) + } + + } + // t.TotalFunds (big.Int) (struct) + case "TotalFunds": + + { + + if err := t.TotalFunds.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.TotalFunds: %w", err) + } + + } + // t.WaitMsgCID (cid.Cid) (struct) + case "WaitMsgCID": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.WaitMsgCID: %w", err) + } + + t.WaitMsgCID = &c + } + + } + // t.MinerWallet (address.Address) (struct) + case "MinerWallet": + + { + + if err := t.MinerWallet.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.MinerWallet: %w", err) + } + + } + // t.PaymentInfo (legacyretrievaltypes.PaymentInfo) (struct) + case "PaymentInfo": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.PaymentInfo = new(legacyretrievaltypes.PaymentInfo) + if err := t.PaymentInfo.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentInfo pointer: %w", err) + } + } + + } + // t.BytesPaidFor (uint64) (uint64) + case "BytesPaidFor": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.BytesPaidFor = uint64(extra) + + } + // t.ClientWallet (address.Address) (struct) + case "ClientWallet": + + { + + if err := t.ClientWallet.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ClientWallet: %w", err) + } + + } + // t.DealProposal (legacyretrievaltypes.DealProposal) (struct) + case "DealProposal": + + { + + if err := t.DealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealProposal: %w", err) + } + + } + // t.TotalReceived (uint64) (uint64) + case "TotalReceived": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.TotalReceived = uint64(extra) + + } + // t.LegacyProtocol (bool) (bool) + case "LegacyProtocol": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.LegacyProtocol = false + case 21: + t.LegacyProtocol = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.CurrentInterval (uint64) (uint64) + case "CurrentInterval": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.CurrentInterval = uint64(extra) + + } + // t.UnsealFundsPaid (big.Int) (struct) + case "UnsealFundsPaid": + + { + + if err := t.UnsealFundsPaid.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.UnsealFundsPaid: %w", err) + } + + } + // t.PaymentRequested (big.Int) (struct) + case "PaymentRequested": + + { + + if err := t.PaymentRequested.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentRequested: %w", err) + } + + } + // t.VoucherShortfall (big.Int) (struct) + case "VoucherShortfall": + + { + + if err := t.VoucherShortfall.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.VoucherShortfall: %w", err) + } + + } + // t.AllBlocksReceived (bool) (bool) + case "AllBlocksReceived": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.AllBlocksReceived = false + case 21: + t.AllBlocksReceived = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.LastPaymentRequested (bool) (bool) + case "LastPaymentRequested": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.LastPaymentRequested = false + case 21: + t.LastPaymentRequested = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *ProviderDealState1) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{171}); err != nil { + return err + } + + // t.Status (legacyretrievaltypes.DealStatus) (uint64) + if len("Status") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Status\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Status"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Status")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Message)); err != nil { + return err + } + + // t.StoreID (uint64) (uint64) + if len("StoreID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"StoreID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StoreID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("StoreID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StoreID)); err != nil { + return err + } + + // t.Receiver (peer.ID) (string) + if len("Receiver") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Receiver\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Receiver"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Receiver")); err != nil { + return err + } + + if len(t.Receiver) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Receiver was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Receiver))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Receiver)); err != nil { + return err + } + + // t.ChannelID (datatransfer.ChannelID) (struct) + if len("ChannelID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ChannelID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ChannelID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("ChannelID")); err != nil { + return err + } + + if err := t.ChannelID.MarshalCBOR(cw); err != nil { + return err + } + + // t.PieceInfo (piecestore.PieceInfo) (struct) + if len("PieceInfo") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceInfo\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PieceInfo"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PieceInfo")); err != nil { + return err + } + + if err := t.PieceInfo.MarshalCBOR(cw); err != nil { + return err + } + + // t.TotalSent (uint64) (uint64) + if len("TotalSent") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TotalSent\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("TotalSent"))); err != nil { + return err + } + if _, err := cw.WriteString(string("TotalSent")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TotalSent)); err != nil { + return err + } + + // t.DealProposal (legacyretrievaltypes.DealProposal) (struct) + if len("DealProposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealProposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealProposal"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealProposal")); err != nil { + return err + } + + if err := t.DealProposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.FundsReceived (big.Int) (struct) + if len("FundsReceived") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FundsReceived\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("FundsReceived"))); err != nil { + return err + } + if _, err := cw.WriteString(string("FundsReceived")); err != nil { + return err + } + + if err := t.FundsReceived.MarshalCBOR(cw); err != nil { + return err + } + + // t.LegacyProtocol (bool) (bool) + if len("LegacyProtocol") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"LegacyProtocol\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("LegacyProtocol"))); err != nil { + return err + } + if _, err := cw.WriteString(string("LegacyProtocol")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.LegacyProtocol); err != nil { + return err + } + + // t.CurrentInterval (uint64) (uint64) + if len("CurrentInterval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CurrentInterval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("CurrentInterval"))); err != nil { + return err + } + if _, err := cw.WriteString(string("CurrentInterval")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.CurrentInterval)); err != nil { + return err + } + + return nil +} + +func (t *ProviderDealState1) UnmarshalCBOR(r io.Reader) (err error) { + *t = ProviderDealState1{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("ProviderDealState1: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Status (legacyretrievaltypes.DealStatus) (uint64) + case "Status": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = legacyretrievaltypes.DealStatus(extra) + + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.StoreID (uint64) (uint64) + case "StoreID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.StoreID = uint64(extra) + + } + // t.Receiver (peer.ID) (string) + case "Receiver": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Receiver = peer.ID(sval) + } + // t.ChannelID (datatransfer.ChannelID) (struct) + case "ChannelID": + + { + + if err := t.ChannelID.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ChannelID: %w", err) + } + + } + // t.PieceInfo (piecestore.PieceInfo) (struct) + case "PieceInfo": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.PieceInfo = new(piecestore.PieceInfo) + if err := t.PieceInfo.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PieceInfo pointer: %w", err) + } + } + + } + // t.TotalSent (uint64) (uint64) + case "TotalSent": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.TotalSent = uint64(extra) + + } + // t.DealProposal (legacyretrievaltypes.DealProposal) (struct) + case "DealProposal": + + { + + if err := t.DealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealProposal: %w", err) + } + + } + // t.FundsReceived (big.Int) (struct) + case "FundsReceived": + + { + + if err := t.FundsReceived.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.FundsReceived: %w", err) + } + + } + // t.LegacyProtocol (bool) (bool) + case "LegacyProtocol": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.LegacyProtocol = false + case 21: + t.LegacyProtocol = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.CurrentInterval (uint64) (uint64) + case "CurrentInterval": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.CurrentInterval = uint64(extra) + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} diff --git a/retrievalmarket/types/legacyretrievaltypes/migrations/migrations.go b/retrievalmarket/types/legacyretrievaltypes/migrations/migrations.go new file mode 100644 index 000000000..85bfdb9e2 --- /dev/null +++ b/retrievalmarket/types/legacyretrievaltypes/migrations/migrations.go @@ -0,0 +1,386 @@ +package migrations + +import ( + "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/markets/piecestore" + piecemigrations "github.com/filecoin-project/boost/markets/piecestore/migrations" + "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-address" + versioning "github.com/filecoin-project/go-ds-versioning/pkg" + "github.com/filecoin-project/go-ds-versioning/pkg/versioned" + "github.com/filecoin-project/go-state-types/abi" + paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych" + + "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes/migrations/maptypes" +) + +//go:generate cbor-gen-for Query0 QueryResponse0 DealProposal0 DealResponse0 Params0 QueryParams0 DealPayment0 ClientDealState0 ProviderDealState0 PaymentInfo0 RetrievalPeer0 Ask0 + +// PaymentInfo0 is version 0 of PaymentInfo +type PaymentInfo0 struct { + PayCh address.Address + Lane uint64 +} + +// ClientDealState0 is version 0 of ClientDealState +type ClientDealState0 struct { + DealProposal0 + StoreID *uint64 + ChannelID datatransfer.ChannelID + LastPaymentRequested bool + AllBlocksReceived bool + TotalFunds abi.TokenAmount + ClientWallet address.Address + MinerWallet address.Address + PaymentInfo *PaymentInfo0 + Status legacyretrievaltypes.DealStatus + Sender peer.ID + TotalReceived uint64 + Message string + BytesPaidFor uint64 + CurrentInterval uint64 + PaymentRequested abi.TokenAmount + FundsSpent abi.TokenAmount + UnsealFundsPaid abi.TokenAmount + WaitMsgCID *cid.Cid // the CID of any message the client deal is waiting for + VoucherShortfall abi.TokenAmount +} + +// ProviderDealState0 is version 0 of ProviderDealState +type ProviderDealState0 struct { + DealProposal0 + StoreID uint64 + ChannelID datatransfer.ChannelID + PieceInfo *piecemigrations.PieceInfo0 + Status legacyretrievaltypes.DealStatus + Receiver peer.ID + TotalSent uint64 + FundsReceived abi.TokenAmount + Message string + CurrentInterval uint64 +} + +// RetrievalPeer0 is version 0 of RetrievalPeer +type RetrievalPeer0 struct { + Address address.Address + ID peer.ID // optional + PieceCID *cid.Cid +} + +// QueryParams0 is version 0 of QueryParams +type QueryParams0 struct { + PieceCID *cid.Cid // optional, query if miner has this cid in this piece. some miners may not be able to respond. + //Selector ipld.Node // optional, query if miner has this cid in this piece. some miners may not be able to respond. + //MaxPricePerByte abi.TokenAmount // optional, tell miner uninterested if more expensive than this + //MinPaymentInterval uint64 // optional, tell miner uninterested unless payment interval is greater than this + //MinPaymentIntervalIncrease uint64 // optional, tell miner uninterested unless payment interval increase is greater than this +} + +// Query0 is version 0 of Query +type Query0 struct { + PayloadCID cid.Cid // V0 + QueryParams0 // V1 +} + +// QueryResponse0 is version 0 of QueryResponse +type QueryResponse0 struct { + Status legacyretrievaltypes.QueryResponseStatus + PieceCIDFound legacyretrievaltypes.QueryItemStatus // V1 - if a PieceCID was requested, the result + //SelectorFound QueryItemStatus // V1 - if a Selector was requested, the result + + Size uint64 // Total size of piece in bytes + //ExpectedPayloadSize uint64 // V1 - optional, if PayloadCID + selector are specified and miner knows, can offer an expected size + + PaymentAddress address.Address // address to send funds to -- may be different than miner addr + MinPricePerByte abi.TokenAmount + MaxPaymentInterval uint64 + MaxPaymentIntervalIncrease uint64 + Message string + UnsealPrice abi.TokenAmount +} + +// Params0 is version 0 of Params +type Params0 struct { + Selector *cbg.Deferred // V1 + PieceCID *cid.Cid + PricePerByte abi.TokenAmount + PaymentInterval uint64 // when to request payment + PaymentIntervalIncrease uint64 + UnsealPrice abi.TokenAmount +} + +// DealProposal0 is version 0 of DealProposal +type DealProposal0 struct { + PayloadCID cid.Cid + ID legacyretrievaltypes.DealID + Params0 +} + +// Type method makes DealProposal0 usable as a voucher +func (dp *DealProposal0) Type() datatransfer.TypeIdentifier { + return "RetrievalDealProposal" +} + +// DealResponse0 is version 0 of DealResponse +type DealResponse0 struct { + Status legacyretrievaltypes.DealStatus + ID legacyretrievaltypes.DealID + + // payment required to proceed + PaymentOwed abi.TokenAmount + + Message string +} + +// Type method makes DealResponse0 usable as a voucher result +func (dr *DealResponse0) Type() datatransfer.TypeIdentifier { + return "RetrievalDealResponse" +} + +// DealPayment0 is version 0 of DealPayment +type DealPayment0 struct { + ID legacyretrievaltypes.DealID + PaymentChannel address.Address + PaymentVoucher *paychtypes.SignedVoucher +} + +// Type method makes DealPayment0 usable as a voucher +func (dr *DealPayment0) Type() datatransfer.TypeIdentifier { + return "RetrievalDealPayment" +} + +// Ask0 is version 0 of Ask +type Ask0 struct { + PricePerByte abi.TokenAmount + UnsealPrice abi.TokenAmount + PaymentInterval uint64 + PaymentIntervalIncrease uint64 +} + +// MigrateQueryParams0To1 migrates tuple encoded query params to map encoded query params +func MigrateQueryParams0To1(oldParams QueryParams0) legacyretrievaltypes.QueryParams { + return legacyretrievaltypes.QueryParams{ + PieceCID: oldParams.PieceCID, + } +} + +// MigrateQuery0To1 migrates tuple encoded query to map encoded query +func MigrateQuery0To1(oldQuery Query0) legacyretrievaltypes.Query { + return legacyretrievaltypes.Query{ + PayloadCID: oldQuery.PayloadCID, + QueryParams: MigrateQueryParams0To1(oldQuery.QueryParams0), + } +} + +// MigrateQueryResponse0To1 migrates tuple encoded query response to map encoded query response +func MigrateQueryResponse0To1(oldQr QueryResponse0) legacyretrievaltypes.QueryResponse { + return legacyretrievaltypes.QueryResponse{ + Status: oldQr.Status, + PieceCIDFound: oldQr.PieceCIDFound, + Size: oldQr.Size, + PaymentAddress: oldQr.PaymentAddress, + MinPricePerByte: oldQr.MinPricePerByte, + MaxPaymentInterval: oldQr.MaxPaymentInterval, + MaxPaymentIntervalIncrease: oldQr.MaxPaymentIntervalIncrease, + Message: oldQr.Message, + UnsealPrice: oldQr.UnsealPrice, + } +} + +// MigrateParams0To1 migrates tuple encoded deal params to map encoded deal params +func MigrateParams0To1(oldParams Params0) legacyretrievaltypes.Params { + return legacyretrievaltypes.Params{ + Selector: oldParams.Selector, + PieceCID: oldParams.PieceCID, + PricePerByte: oldParams.PricePerByte, + PaymentInterval: oldParams.PaymentInterval, + PaymentIntervalIncrease: oldParams.PaymentIntervalIncrease, + UnsealPrice: oldParams.UnsealPrice, + } +} + +// MigrateDealPayment0To1 migrates a tuple encoded DealPayment to a map +// encoded deal payment +func MigrateDealPayment0To1(oldDp DealPayment0) legacyretrievaltypes.DealPayment { + return legacyretrievaltypes.DealPayment{ + ID: oldDp.ID, + PaymentChannel: oldDp.PaymentChannel, + PaymentVoucher: oldDp.PaymentVoucher, + } +} + +// MigrateDealProposal0To1 migrates a tuple encoded DealProposal to a map +// encoded deal proposal +func MigrateDealProposal0To1(oldDp DealProposal0) legacyretrievaltypes.DealProposal { + return legacyretrievaltypes.DealProposal{ + PayloadCID: oldDp.PayloadCID, + ID: oldDp.ID, + Params: MigrateParams0To1(oldDp.Params0), + } +} + +// MigrateDealResponse0To1 migrates a tuple encoded DealResponse to a map +// encoded deal response +func MigrateDealResponse0To1(oldDr DealResponse0) legacyretrievaltypes.DealResponse { + return legacyretrievaltypes.DealResponse{ + Status: oldDr.Status, + ID: oldDr.ID, + PaymentOwed: oldDr.PaymentOwed, + Message: oldDr.Message, + } +} + +// MigratePaymentInfo0To1 migrates an optional payment info tuple encoded struct +// to a map encoded struct +func MigratePaymentInfo0To1(oldPi *PaymentInfo0) *legacyretrievaltypes.PaymentInfo { + if oldPi == nil { + return nil + } + return &legacyretrievaltypes.PaymentInfo{ + PayCh: oldPi.PayCh, + Lane: oldPi.Lane, + } +} + +// MigrateClientDealState0To1 migrates a tuple encoded deal state to a map encoded deal state +func MigrateClientDealState0To1(oldDs *ClientDealState0) (*maptypes.ClientDealState1, error) { + return &maptypes.ClientDealState1{ + DealProposal: MigrateDealProposal0To1(oldDs.DealProposal0), + StoreID: oldDs.StoreID, + ChannelID: oldDs.ChannelID, + LastPaymentRequested: oldDs.LastPaymentRequested, + AllBlocksReceived: oldDs.AllBlocksReceived, + TotalFunds: oldDs.TotalFunds, + ClientWallet: oldDs.ClientWallet, + MinerWallet: oldDs.MinerWallet, + PaymentInfo: MigratePaymentInfo0To1(oldDs.PaymentInfo), + Status: oldDs.Status, + Sender: oldDs.Sender, + TotalReceived: oldDs.TotalReceived, + Message: oldDs.Message, + BytesPaidFor: oldDs.BytesPaidFor, + CurrentInterval: oldDs.CurrentInterval, + PaymentRequested: oldDs.PaymentRequested, + FundsSpent: oldDs.FundsSpent, + UnsealFundsPaid: oldDs.UnsealFundsPaid, + WaitMsgCID: oldDs.WaitMsgCID, + VoucherShortfall: oldDs.VoucherShortfall, + LegacyProtocol: true, + }, nil +} + +// MigrateClientDealState1To2 migrates from v1 to v2 of a ClientDealState. +// The difference is that in v2 the ChannelID is a pointer, because the +// ChannelID is not set until the data transfer has started, so it should +// initially be nil. +func MigrateClientDealState1To2(oldDs *maptypes.ClientDealState1) (*legacyretrievaltypes.ClientDealState, error) { + var chid *datatransfer.ChannelID + if oldDs.ChannelID.Initiator != "" && oldDs.ChannelID.Responder != "" { + chid = &oldDs.ChannelID + } + return &legacyretrievaltypes.ClientDealState{ + DealProposal: oldDs.DealProposal, + StoreID: oldDs.StoreID, + ChannelID: chid, + LastPaymentRequested: oldDs.LastPaymentRequested, + AllBlocksReceived: oldDs.AllBlocksReceived, + TotalFunds: oldDs.TotalFunds, + ClientWallet: oldDs.ClientWallet, + MinerWallet: oldDs.MinerWallet, + PaymentInfo: oldDs.PaymentInfo, + Status: oldDs.Status, + Sender: oldDs.Sender, + TotalReceived: oldDs.TotalReceived, + Message: oldDs.Message, + BytesPaidFor: oldDs.BytesPaidFor, + CurrentInterval: oldDs.CurrentInterval, + PaymentRequested: oldDs.PaymentRequested, + FundsSpent: oldDs.FundsSpent, + UnsealFundsPaid: oldDs.UnsealFundsPaid, + WaitMsgCID: oldDs.WaitMsgCID, + VoucherShortfall: oldDs.VoucherShortfall, + LegacyProtocol: true, + }, nil +} + +// MigrateProviderDealState0To1 migrates a tuple encoded deal state to a map encoded deal state +func MigrateProviderDealState0To1(oldDs *ProviderDealState0) (*maptypes.ProviderDealState1, error) { + var pieceInfo *piecestore.PieceInfo + var err error + if oldDs.PieceInfo != nil { + pieceInfo, err = piecemigrations.MigratePieceInfo0To1(oldDs.PieceInfo) + if err != nil { + return nil, err + } + } + return &maptypes.ProviderDealState1{ + DealProposal: MigrateDealProposal0To1(oldDs.DealProposal0), + StoreID: oldDs.StoreID, + ChannelID: oldDs.ChannelID, + PieceInfo: pieceInfo, + Status: oldDs.Status, + Receiver: oldDs.Receiver, + TotalSent: oldDs.TotalSent, + FundsReceived: oldDs.FundsReceived, + Message: oldDs.Message, + CurrentInterval: oldDs.CurrentInterval, + LegacyProtocol: true, + }, nil +} + +// MigrateProviderDealState0To1 migrates from v1 to v2 of a +// MigrateProviderDealState. +// The difference is that in v2 the ChannelID is a pointer, because the +// ChannelID is not set until the data transfer has started, so it should +// initially be nil. +func MigrateProviderDealState1To2(oldDs *maptypes.ProviderDealState1) (*legacyretrievaltypes.ProviderDealState, error) { + var chid *datatransfer.ChannelID + if oldDs.ChannelID.Initiator != "" && oldDs.ChannelID.Responder != "" { + chid = &oldDs.ChannelID + } + return &legacyretrievaltypes.ProviderDealState{ + DealProposal: oldDs.DealProposal, + StoreID: oldDs.StoreID, + ChannelID: chid, + PieceInfo: oldDs.PieceInfo, + Status: oldDs.Status, + Receiver: oldDs.Receiver, + TotalSent: oldDs.TotalSent, + FundsReceived: oldDs.FundsReceived, + Message: oldDs.Message, + CurrentInterval: oldDs.CurrentInterval, + LegacyProtocol: oldDs.LegacyProtocol, + }, nil +} + +// MigrateAsk0To1 migrates a tuple encoded deal state to a map encoded deal state +func MigrateAsk0To1(oldAsk *Ask0) (*legacyretrievaltypes.Ask, error) { + return &legacyretrievaltypes.Ask{ + PricePerByte: oldAsk.PricePerByte, + UnsealPrice: oldAsk.UnsealPrice, + PaymentInterval: oldAsk.PaymentInterval, + PaymentIntervalIncrease: oldAsk.PaymentIntervalIncrease, + }, nil +} + +// ClientMigrations are migrations for the client's store of retrieval deals +var ClientMigrations = versioned.BuilderList{ + versioned.NewVersionedBuilder(MigrateClientDealState0To1, "1"), + versioned.NewVersionedBuilder(MigrateClientDealState1To2, "2").OldVersion("1"), +} + +// ProviderMigrations are migrations for the providers's store of retrieval deals +var ProviderMigrations = versioned.BuilderList{ + versioned.NewVersionedBuilder(MigrateProviderDealState0To1, "1"). + FilterKeys([]string{"/retrieval-ask", "/retrieval-ask/latest", "/retrieval-ask/1/latest", "/retrieval-ask/versions/current"}), + versioned.NewVersionedBuilder(MigrateProviderDealState1To2, "2").OldVersion("1"), +} + +// AskMigrations are migrations for the providers's retrieval ask +var AskMigrations = versioned.BuilderList{ + versioned.NewVersionedBuilder(MigrateAsk0To1, versioning.VersionKey("1")), +} diff --git a/retrievalmarket/types/legacyretrievaltypes/migrations/migrations_cbor_gen.go b/retrievalmarket/types/legacyretrievaltypes/migrations/migrations_cbor_gen.go new file mode 100644 index 000000000..95e6d6601 --- /dev/null +++ b/retrievalmarket/types/legacyretrievaltypes/migrations/migrations_cbor_gen.go @@ -0,0 +1,1815 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package migrations + +import ( + "fmt" + "io" + "math" + "sort" + + "github.com/filecoin-project/boost/markets/piecestore/migrations" + "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" + "github.com/filecoin-project/go-state-types/builtin/v8/paych" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +var lengthBufQuery0 = []byte{130} + +func (t *Query0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufQuery0); err != nil { + return err + } + + // t.PayloadCID (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.PayloadCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PayloadCID: %w", err) + } + + // t.QueryParams0 (migrations.QueryParams0) (struct) + if err := t.QueryParams0.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *Query0) UnmarshalCBOR(r io.Reader) (err error) { + *t = Query0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.PayloadCID (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PayloadCID: %w", err) + } + + t.PayloadCID = c + + } + // t.QueryParams0 (migrations.QueryParams0) (struct) + + { + + if err := t.QueryParams0.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.QueryParams0: %w", err) + } + + } + return nil +} + +var lengthBufQueryResponse0 = []byte{137} + +func (t *QueryResponse0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufQueryResponse0); err != nil { + return err + } + + // t.Status (legacyretrievaltypes.QueryResponseStatus) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err + } + + // t.PieceCIDFound (legacyretrievaltypes.QueryItemStatus) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PieceCIDFound)); err != nil { + return err + } + + // t.Size (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Size)); err != nil { + return err + } + + // t.PaymentAddress (address.Address) (struct) + if err := t.PaymentAddress.MarshalCBOR(cw); err != nil { + return err + } + + // t.MinPricePerByte (big.Int) (struct) + if err := t.MinPricePerByte.MarshalCBOR(cw); err != nil { + return err + } + + // t.MaxPaymentInterval (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.MaxPaymentInterval)); err != nil { + return err + } + + // t.MaxPaymentIntervalIncrease (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.MaxPaymentIntervalIncrease)); err != nil { + return err + } + + // t.Message (string) (string) + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Message)); err != nil { + return err + } + + // t.UnsealPrice (big.Int) (struct) + if err := t.UnsealPrice.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *QueryResponse0) UnmarshalCBOR(r io.Reader) (err error) { + *t = QueryResponse0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 9 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Status (legacyretrievaltypes.QueryResponseStatus) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = legacyretrievaltypes.QueryResponseStatus(extra) + + } + // t.PieceCIDFound (legacyretrievaltypes.QueryItemStatus) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PieceCIDFound = legacyretrievaltypes.QueryItemStatus(extra) + + } + // t.Size (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Size = uint64(extra) + + } + // t.PaymentAddress (address.Address) (struct) + + { + + if err := t.PaymentAddress.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentAddress: %w", err) + } + + } + // t.MinPricePerByte (big.Int) (struct) + + { + + if err := t.MinPricePerByte.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.MinPricePerByte: %w", err) + } + + } + // t.MaxPaymentInterval (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.MaxPaymentInterval = uint64(extra) + + } + // t.MaxPaymentIntervalIncrease (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.MaxPaymentIntervalIncrease = uint64(extra) + + } + // t.Message (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.UnsealPrice (big.Int) (struct) + + { + + if err := t.UnsealPrice.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.UnsealPrice: %w", err) + } + + } + return nil +} + +var lengthBufDealProposal0 = []byte{131} + +func (t *DealProposal0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufDealProposal0); err != nil { + return err + } + + // t.PayloadCID (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.PayloadCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PayloadCID: %w", err) + } + + // t.ID (legacyretrievaltypes.DealID) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ID)); err != nil { + return err + } + + // t.Params0 (migrations.Params0) (struct) + if err := t.Params0.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *DealProposal0) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealProposal0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.PayloadCID (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PayloadCID: %w", err) + } + + t.PayloadCID = c + + } + // t.ID (legacyretrievaltypes.DealID) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.ID = legacyretrievaltypes.DealID(extra) + + } + // t.Params0 (migrations.Params0) (struct) + + { + + if err := t.Params0.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Params0: %w", err) + } + + } + return nil +} + +var lengthBufDealResponse0 = []byte{132} + +func (t *DealResponse0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufDealResponse0); err != nil { + return err + } + + // t.Status (legacyretrievaltypes.DealStatus) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err + } + + // t.ID (legacyretrievaltypes.DealID) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ID)); err != nil { + return err + } + + // t.PaymentOwed (big.Int) (struct) + if err := t.PaymentOwed.MarshalCBOR(cw); err != nil { + return err + } + + // t.Message (string) (string) + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Message)); err != nil { + return err + } + return nil +} + +func (t *DealResponse0) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealResponse0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 4 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Status (legacyretrievaltypes.DealStatus) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = legacyretrievaltypes.DealStatus(extra) + + } + // t.ID (legacyretrievaltypes.DealID) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.ID = legacyretrievaltypes.DealID(extra) + + } + // t.PaymentOwed (big.Int) (struct) + + { + + if err := t.PaymentOwed.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentOwed: %w", err) + } + + } + // t.Message (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + return nil +} + +var lengthBufParams0 = []byte{134} + +func (t *Params0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufParams0); err != nil { + return err + } + + // t.Selector (typegen.Deferred) (struct) + if err := t.Selector.MarshalCBOR(cw); err != nil { + return err + } + + // t.PieceCID (cid.Cid) (struct) + + if t.PieceCID == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PieceCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) + } + } + + // t.PricePerByte (big.Int) (struct) + if err := t.PricePerByte.MarshalCBOR(cw); err != nil { + return err + } + + // t.PaymentInterval (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PaymentInterval)); err != nil { + return err + } + + // t.PaymentIntervalIncrease (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PaymentIntervalIncrease)); err != nil { + return err + } + + // t.UnsealPrice (big.Int) (struct) + if err := t.UnsealPrice.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *Params0) UnmarshalCBOR(r io.Reader) (err error) { + *t = Params0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 6 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Selector (typegen.Deferred) (struct) + + { + + t.Selector = new(cbg.Deferred) + + if err := t.Selector.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("failed to read deferred field: %w", err) + } + } + // t.PieceCID (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + } + + t.PieceCID = &c + } + + } + // t.PricePerByte (big.Int) (struct) + + { + + if err := t.PricePerByte.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PricePerByte: %w", err) + } + + } + // t.PaymentInterval (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PaymentInterval = uint64(extra) + + } + // t.PaymentIntervalIncrease (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PaymentIntervalIncrease = uint64(extra) + + } + // t.UnsealPrice (big.Int) (struct) + + { + + if err := t.UnsealPrice.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.UnsealPrice: %w", err) + } + + } + return nil +} + +var lengthBufQueryParams0 = []byte{129} + +func (t *QueryParams0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufQueryParams0); err != nil { + return err + } + + // t.PieceCID (cid.Cid) (struct) + + if t.PieceCID == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PieceCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) + } + } + + return nil +} + +func (t *QueryParams0) UnmarshalCBOR(r io.Reader) (err error) { + *t = QueryParams0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.PieceCID (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + } + + t.PieceCID = &c + } + + } + return nil +} + +var lengthBufDealPayment0 = []byte{131} + +func (t *DealPayment0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufDealPayment0); err != nil { + return err + } + + // t.ID (legacyretrievaltypes.DealID) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ID)); err != nil { + return err + } + + // t.PaymentChannel (address.Address) (struct) + if err := t.PaymentChannel.MarshalCBOR(cw); err != nil { + return err + } + + // t.PaymentVoucher (paych.SignedVoucher) (struct) + if err := t.PaymentVoucher.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *DealPayment0) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealPayment0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.ID (legacyretrievaltypes.DealID) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.ID = legacyretrievaltypes.DealID(extra) + + } + // t.PaymentChannel (address.Address) (struct) + + { + + if err := t.PaymentChannel.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentChannel: %w", err) + } + + } + // t.PaymentVoucher (paych.SignedVoucher) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.PaymentVoucher = new(paych.SignedVoucher) + if err := t.PaymentVoucher.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentVoucher pointer: %w", err) + } + } + + } + return nil +} + +var lengthBufClientDealState0 = []byte{148} + +func (t *ClientDealState0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufClientDealState0); err != nil { + return err + } + + // t.DealProposal0 (migrations.DealProposal0) (struct) + if err := t.DealProposal0.MarshalCBOR(cw); err != nil { + return err + } + + // t.StoreID (uint64) (uint64) + + if t.StoreID == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(*t.StoreID)); err != nil { + return err + } + } + + // t.ChannelID (datatransfer.ChannelID) (struct) + if err := t.ChannelID.MarshalCBOR(cw); err != nil { + return err + } + + // t.LastPaymentRequested (bool) (bool) + if err := cbg.WriteBool(w, t.LastPaymentRequested); err != nil { + return err + } + + // t.AllBlocksReceived (bool) (bool) + if err := cbg.WriteBool(w, t.AllBlocksReceived); err != nil { + return err + } + + // t.TotalFunds (big.Int) (struct) + if err := t.TotalFunds.MarshalCBOR(cw); err != nil { + return err + } + + // t.ClientWallet (address.Address) (struct) + if err := t.ClientWallet.MarshalCBOR(cw); err != nil { + return err + } + + // t.MinerWallet (address.Address) (struct) + if err := t.MinerWallet.MarshalCBOR(cw); err != nil { + return err + } + + // t.PaymentInfo (migrations.PaymentInfo0) (struct) + if err := t.PaymentInfo.MarshalCBOR(cw); err != nil { + return err + } + + // t.Status (legacyretrievaltypes.DealStatus) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err + } + + // t.Sender (peer.ID) (string) + if len(t.Sender) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Sender was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Sender))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Sender)); err != nil { + return err + } + + // t.TotalReceived (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TotalReceived)); err != nil { + return err + } + + // t.Message (string) (string) + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Message)); err != nil { + return err + } + + // t.BytesPaidFor (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.BytesPaidFor)); err != nil { + return err + } + + // t.CurrentInterval (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.CurrentInterval)); err != nil { + return err + } + + // t.PaymentRequested (big.Int) (struct) + if err := t.PaymentRequested.MarshalCBOR(cw); err != nil { + return err + } + + // t.FundsSpent (big.Int) (struct) + if err := t.FundsSpent.MarshalCBOR(cw); err != nil { + return err + } + + // t.UnsealFundsPaid (big.Int) (struct) + if err := t.UnsealFundsPaid.MarshalCBOR(cw); err != nil { + return err + } + + // t.WaitMsgCID (cid.Cid) (struct) + + if t.WaitMsgCID == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.WaitMsgCID); err != nil { + return xerrors.Errorf("failed to write cid field t.WaitMsgCID: %w", err) + } + } + + // t.VoucherShortfall (big.Int) (struct) + if err := t.VoucherShortfall.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *ClientDealState0) UnmarshalCBOR(r io.Reader) (err error) { + *t = ClientDealState0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 20 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.DealProposal0 (migrations.DealProposal0) (struct) + + { + + if err := t.DealProposal0.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealProposal0: %w", err) + } + + } + // t.StoreID (uint64) (uint64) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + typed := uint64(extra) + t.StoreID = &typed + } + + } + // t.ChannelID (datatransfer.ChannelID) (struct) + + { + + if err := t.ChannelID.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ChannelID: %w", err) + } + + } + // t.LastPaymentRequested (bool) (bool) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.LastPaymentRequested = false + case 21: + t.LastPaymentRequested = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.AllBlocksReceived (bool) (bool) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.AllBlocksReceived = false + case 21: + t.AllBlocksReceived = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.TotalFunds (big.Int) (struct) + + { + + if err := t.TotalFunds.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.TotalFunds: %w", err) + } + + } + // t.ClientWallet (address.Address) (struct) + + { + + if err := t.ClientWallet.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ClientWallet: %w", err) + } + + } + // t.MinerWallet (address.Address) (struct) + + { + + if err := t.MinerWallet.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.MinerWallet: %w", err) + } + + } + // t.PaymentInfo (migrations.PaymentInfo0) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.PaymentInfo = new(PaymentInfo0) + if err := t.PaymentInfo.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentInfo pointer: %w", err) + } + } + + } + // t.Status (legacyretrievaltypes.DealStatus) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = legacyretrievaltypes.DealStatus(extra) + + } + // t.Sender (peer.ID) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Sender = peer.ID(sval) + } + // t.TotalReceived (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.TotalReceived = uint64(extra) + + } + // t.Message (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.BytesPaidFor (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.BytesPaidFor = uint64(extra) + + } + // t.CurrentInterval (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.CurrentInterval = uint64(extra) + + } + // t.PaymentRequested (big.Int) (struct) + + { + + if err := t.PaymentRequested.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentRequested: %w", err) + } + + } + // t.FundsSpent (big.Int) (struct) + + { + + if err := t.FundsSpent.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.FundsSpent: %w", err) + } + + } + // t.UnsealFundsPaid (big.Int) (struct) + + { + + if err := t.UnsealFundsPaid.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.UnsealFundsPaid: %w", err) + } + + } + // t.WaitMsgCID (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.WaitMsgCID: %w", err) + } + + t.WaitMsgCID = &c + } + + } + // t.VoucherShortfall (big.Int) (struct) + + { + + if err := t.VoucherShortfall.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.VoucherShortfall: %w", err) + } + + } + return nil +} + +var lengthBufProviderDealState0 = []byte{138} + +func (t *ProviderDealState0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufProviderDealState0); err != nil { + return err + } + + // t.DealProposal0 (migrations.DealProposal0) (struct) + if err := t.DealProposal0.MarshalCBOR(cw); err != nil { + return err + } + + // t.StoreID (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StoreID)); err != nil { + return err + } + + // t.ChannelID (datatransfer.ChannelID) (struct) + if err := t.ChannelID.MarshalCBOR(cw); err != nil { + return err + } + + // t.PieceInfo (migrations.PieceInfo0) (struct) + if err := t.PieceInfo.MarshalCBOR(cw); err != nil { + return err + } + + // t.Status (legacyretrievaltypes.DealStatus) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err + } + + // t.Receiver (peer.ID) (string) + if len(t.Receiver) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Receiver was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Receiver))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Receiver)); err != nil { + return err + } + + // t.TotalSent (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TotalSent)); err != nil { + return err + } + + // t.FundsReceived (big.Int) (struct) + if err := t.FundsReceived.MarshalCBOR(cw); err != nil { + return err + } + + // t.Message (string) (string) + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Message)); err != nil { + return err + } + + // t.CurrentInterval (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.CurrentInterval)); err != nil { + return err + } + + return nil +} + +func (t *ProviderDealState0) UnmarshalCBOR(r io.Reader) (err error) { + *t = ProviderDealState0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 10 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.DealProposal0 (migrations.DealProposal0) (struct) + + { + + if err := t.DealProposal0.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealProposal0: %w", err) + } + + } + // t.StoreID (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.StoreID = uint64(extra) + + } + // t.ChannelID (datatransfer.ChannelID) (struct) + + { + + if err := t.ChannelID.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ChannelID: %w", err) + } + + } + // t.PieceInfo (migrations.PieceInfo0) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.PieceInfo = new(migrations.PieceInfo0) + if err := t.PieceInfo.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PieceInfo pointer: %w", err) + } + } + + } + // t.Status (legacyretrievaltypes.DealStatus) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = legacyretrievaltypes.DealStatus(extra) + + } + // t.Receiver (peer.ID) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Receiver = peer.ID(sval) + } + // t.TotalSent (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.TotalSent = uint64(extra) + + } + // t.FundsReceived (big.Int) (struct) + + { + + if err := t.FundsReceived.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.FundsReceived: %w", err) + } + + } + // t.Message (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.CurrentInterval (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.CurrentInterval = uint64(extra) + + } + return nil +} + +var lengthBufPaymentInfo0 = []byte{130} + +func (t *PaymentInfo0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufPaymentInfo0); err != nil { + return err + } + + // t.PayCh (address.Address) (struct) + if err := t.PayCh.MarshalCBOR(cw); err != nil { + return err + } + + // t.Lane (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Lane)); err != nil { + return err + } + + return nil +} + +func (t *PaymentInfo0) UnmarshalCBOR(r io.Reader) (err error) { + *t = PaymentInfo0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.PayCh (address.Address) (struct) + + { + + if err := t.PayCh.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PayCh: %w", err) + } + + } + // t.Lane (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Lane = uint64(extra) + + } + return nil +} + +var lengthBufRetrievalPeer0 = []byte{131} + +func (t *RetrievalPeer0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufRetrievalPeer0); err != nil { + return err + } + + // t.Address (address.Address) (struct) + if err := t.Address.MarshalCBOR(cw); err != nil { + return err + } + + // t.ID (peer.ID) (string) + if len(t.ID) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.ID was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.ID))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.ID)); err != nil { + return err + } + + // t.PieceCID (cid.Cid) (struct) + + if t.PieceCID == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PieceCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) + } + } + + return nil +} + +func (t *RetrievalPeer0) UnmarshalCBOR(r io.Reader) (err error) { + *t = RetrievalPeer0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Address (address.Address) (struct) + + { + + if err := t.Address.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Address: %w", err) + } + + } + // t.ID (peer.ID) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.ID = peer.ID(sval) + } + // t.PieceCID (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + } + + t.PieceCID = &c + } + + } + return nil +} + +var lengthBufAsk0 = []byte{132} + +func (t *Ask0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufAsk0); err != nil { + return err + } + + // t.PricePerByte (big.Int) (struct) + if err := t.PricePerByte.MarshalCBOR(cw); err != nil { + return err + } + + // t.UnsealPrice (big.Int) (struct) + if err := t.UnsealPrice.MarshalCBOR(cw); err != nil { + return err + } + + // t.PaymentInterval (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PaymentInterval)); err != nil { + return err + } + + // t.PaymentIntervalIncrease (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PaymentIntervalIncrease)); err != nil { + return err + } + + return nil +} + +func (t *Ask0) UnmarshalCBOR(r io.Reader) (err error) { + *t = Ask0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 4 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.PricePerByte (big.Int) (struct) + + { + + if err := t.PricePerByte.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PricePerByte: %w", err) + } + + } + // t.UnsealPrice (big.Int) (struct) + + { + + if err := t.UnsealPrice.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.UnsealPrice: %w", err) + } + + } + // t.PaymentInterval (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PaymentInterval = uint64(extra) + + } + // t.PaymentIntervalIncrease (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PaymentIntervalIncrease = uint64(extra) + + } + return nil +} diff --git a/retrievalmarket/types/legacyretrievaltypes/types.go b/retrievalmarket/types/legacyretrievaltypes/types.go new file mode 100644 index 000000000..fe9f00bc1 --- /dev/null +++ b/retrievalmarket/types/legacyretrievaltypes/types.go @@ -0,0 +1,509 @@ +package legacyretrievaltypes + +import ( + "bytes" + "errors" + "fmt" + + "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/markets/piecestore" + "github.com/filecoin-project/boost/markets/shared" + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/net/context" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych" +) + +//go:generate cbor-gen-for --map-encoding Query QueryResponse DealProposal DealResponse Params QueryParams DealPayment ClientDealState ProviderDealState PaymentInfo RetrievalPeer Ask + +// QueryProtocolID is the protocol for querying information about retrieval +// deal parameters +const QueryProtocolID = protocol.ID("/fil/retrieval/qry/1.0.0") + +// OldQueryProtocolID is the old query protocol for tuple structs +const OldQueryProtocolID = protocol.ID("/fil/retrieval/qry/0.0.1") + +// Unsubscribe is a function that unsubscribes a subscriber for either the +// client or the provider +type Unsubscribe func() + +// PaymentInfo is the payment channel and lane for a deal, once it is setup +type PaymentInfo struct { + PayCh address.Address + Lane uint64 +} + +// ClientDealState is the current state of a deal from the point of view +// of a retrieval client +type ClientDealState struct { + DealProposal + StoreID *uint64 + // Set when the data transfer is started + ChannelID *datatransfer.ChannelID + LastPaymentRequested bool + AllBlocksReceived bool + TotalFunds abi.TokenAmount + ClientWallet address.Address + MinerWallet address.Address + PaymentInfo *PaymentInfo + Status DealStatus + Sender peer.ID + TotalReceived uint64 + Message string + BytesPaidFor uint64 + CurrentInterval uint64 + PaymentRequested abi.TokenAmount + FundsSpent abi.TokenAmount + UnsealFundsPaid abi.TokenAmount + WaitMsgCID *cid.Cid // the CID of any message the client deal is waiting for + VoucherShortfall abi.TokenAmount + LegacyProtocol bool +} + +func (deal *ClientDealState) NextInterval() uint64 { + return deal.Params.NextInterval(deal.CurrentInterval) +} + +type ProviderQueryEvent struct { + Response QueryResponse + Error error +} + +type ProviderValidationEvent struct { + IsRestart bool + Receiver peer.ID + Proposal *DealProposal + BaseCid cid.Cid + Selector ipld.Node + Response *DealResponse + Error error +} + +// ProviderDealState is the current state of a deal from the point of view +// of a retrieval provider +type ProviderDealState struct { + DealProposal + StoreID uint64 + + ChannelID *datatransfer.ChannelID + PieceInfo *piecestore.PieceInfo + Status DealStatus + Receiver peer.ID + TotalSent uint64 + FundsReceived abi.TokenAmount + Message string + CurrentInterval uint64 + LegacyProtocol bool +} + +func (deal *ProviderDealState) IntervalLowerBound() uint64 { + return deal.Params.IntervalLowerBound(deal.CurrentInterval) +} + +func (deal *ProviderDealState) NextInterval() uint64 { + return deal.Params.NextInterval(deal.CurrentInterval) +} + +// Identifier provides a unique id for this provider deal +func (pds ProviderDealState) Identifier() ProviderDealIdentifier { + return ProviderDealIdentifier{Receiver: pds.Receiver, DealID: pds.ID} +} + +// ProviderDealIdentifier is a value that uniquely identifies a deal +type ProviderDealIdentifier struct { + Receiver peer.ID + DealID DealID +} + +func (p ProviderDealIdentifier) String() string { + return fmt.Sprintf("%v/%v", p.Receiver, p.DealID) +} + +// RetrievalPeer is a provider address/peer.ID pair (everything needed to make +// deals for with a miner) +type RetrievalPeer struct { + Address address.Address + ID peer.ID // optional + PieceCID *cid.Cid +} + +// QueryResponseStatus indicates whether a queried piece is available +type QueryResponseStatus uint64 + +const ( + // QueryResponseAvailable indicates a provider has a piece and is prepared to + // return it + QueryResponseAvailable QueryResponseStatus = iota + + // QueryResponseUnavailable indicates a provider either does not have or cannot + // serve the queried piece to the client + QueryResponseUnavailable + + // QueryResponseError indicates something went wrong generating a query response + QueryResponseError +) + +// QueryItemStatus (V1) indicates whether the requested part of a piece (payload or selector) +// is available for retrieval +type QueryItemStatus uint64 + +const ( + // QueryItemAvailable indicates requested part of the piece is available to be + // served + QueryItemAvailable QueryItemStatus = iota + + // QueryItemUnavailable indicates the piece either does not contain the requested + // item or it cannot be served + QueryItemUnavailable + + // QueryItemUnknown indicates the provider cannot determine if the given item + // is part of the requested piece (for example, if the piece is sealed and the + // miner does not maintain a payload CID index) + QueryItemUnknown +) + +// QueryParams - V1 - indicate what specific information about a piece that a retrieval +// client is interested in, as well as specific parameters the client is seeking +// for the retrieval deal +type QueryParams struct { + PieceCID *cid.Cid // optional, query if miner has this cid in this piece. some miners may not be able to respond. + //Selector ipld.Node // optional, query if miner has this cid in this piece. some miners may not be able to respond. + //MaxPricePerByte abi.TokenAmount // optional, tell miner uninterested if more expensive than this + //MinPaymentInterval uint64 // optional, tell miner uninterested unless payment interval is greater than this + //MinPaymentIntervalIncrease uint64 // optional, tell miner uninterested unless payment interval increase is greater than this +} + +// Query is a query to a given provider to determine information about a piece +// they may have available for retrieval +type Query struct { + PayloadCID cid.Cid // V0 + QueryParams // V1 +} + +// QueryUndefined is a query with no values +var QueryUndefined = Query{} + +// NewQueryV0 creates a V0 query (which only specifies a payload) +func NewQueryV0(payloadCID cid.Cid) Query { + return Query{PayloadCID: payloadCID} +} + +// NewQueryV1 creates a V1 query (which has an optional pieceCID) +func NewQueryV1(payloadCID cid.Cid, pieceCID *cid.Cid) Query { + return Query{ + PayloadCID: payloadCID, + QueryParams: QueryParams{ + PieceCID: pieceCID, + }, + } +} + +// QueryResponse is a miners response to a given retrieval query +type QueryResponse struct { + Status QueryResponseStatus + PieceCIDFound QueryItemStatus // V1 - if a PieceCID was requested, the result + //SelectorFound QueryItemStatus // V1 - if a Selector was requested, the result + + Size uint64 // Total size of piece in bytes + //ExpectedPayloadSize uint64 // V1 - optional, if PayloadCID + selector are specified and miner knows, can offer an expected size + + PaymentAddress address.Address // address to send funds to -- may be different than miner addr + MinPricePerByte abi.TokenAmount + MaxPaymentInterval uint64 + MaxPaymentIntervalIncrease uint64 + Message string + UnsealPrice abi.TokenAmount +} + +// QueryResponseUndefined is an empty QueryResponse +var QueryResponseUndefined = QueryResponse{} + +// PieceRetrievalPrice is the total price to retrieve the piece (size * MinPricePerByte + UnsealedPrice) +func (qr QueryResponse) PieceRetrievalPrice() abi.TokenAmount { + return big.Add(big.Mul(qr.MinPricePerByte, abi.NewTokenAmount(int64(qr.Size))), qr.UnsealPrice) +} + +// PayloadRetrievalPrice is the expected price to retrieve just the given payload +// & selector (V1) +//func (qr QueryResponse) PayloadRetrievalPrice() abi.TokenAmount { +// return types.BigMul(qr.MinPricePerByte, types.NewInt(qr.ExpectedPayloadSize)) +//} + +// IsTerminalError returns true if this status indicates processing of this deal +// is complete with an error +func IsTerminalError(status DealStatus) bool { + return status == DealStatusDealNotFound || + status == DealStatusFailing || + status == DealStatusRejected +} + +// IsTerminalSuccess returns true if this status indicates processing of this deal +// is complete with a success +func IsTerminalSuccess(status DealStatus) bool { + return status == DealStatusCompleted +} + +// IsTerminalStatus returns true if this status indicates processing of a deal is +// complete (either success or error) +func IsTerminalStatus(status DealStatus) bool { + return IsTerminalError(status) || IsTerminalSuccess(status) +} + +// Params are the parameters requested for a retrieval deal proposal +type Params struct { + Selector *cbg.Deferred // V1 + PieceCID *cid.Cid + PricePerByte abi.TokenAmount + PaymentInterval uint64 // when to request payment + PaymentIntervalIncrease uint64 + UnsealPrice abi.TokenAmount +} + +func (p Params) SelectorSpecified() bool { + return p.Selector != nil && !bytes.Equal(p.Selector.Raw, cbg.CborNull) +} + +func (p Params) IntervalLowerBound(currentInterval uint64) uint64 { + intervalSize := p.PaymentInterval + var lowerBound uint64 + var target uint64 + for target < currentInterval { + lowerBound = target + target += intervalSize + intervalSize += p.PaymentIntervalIncrease + } + return lowerBound +} + +func (p Params) NextInterval(currentInterval uint64) uint64 { + intervalSize := p.PaymentInterval + var nextInterval uint64 + for nextInterval <= currentInterval { + nextInterval += intervalSize + intervalSize += p.PaymentIntervalIncrease + } + return nextInterval +} + +// NewParamsV0 generates parameters for a retrieval deal, which is always a whole piece deal +func NewParamsV0(pricePerByte abi.TokenAmount, paymentInterval uint64, paymentIntervalIncrease uint64) Params { + return Params{ + PricePerByte: pricePerByte, + PaymentInterval: paymentInterval, + PaymentIntervalIncrease: paymentIntervalIncrease, + UnsealPrice: big.Zero(), + } +} + +// NewParamsV1 generates parameters for a retrieval deal, including a selector +func NewParamsV1(pricePerByte abi.TokenAmount, paymentInterval uint64, paymentIntervalIncrease uint64, sel ipld.Node, pieceCid *cid.Cid, unsealPrice abi.TokenAmount) (Params, error) { + var buffer bytes.Buffer + + if sel == nil { + return Params{}, xerrors.New("selector required for NewParamsV1") + } + + err := dagcbor.Encode(sel, &buffer) + if err != nil { + return Params{}, xerrors.Errorf("error encoding selector: %w", err) + } + + return Params{ + Selector: &cbg.Deferred{Raw: buffer.Bytes()}, + PieceCID: pieceCid, + PricePerByte: pricePerByte, + PaymentInterval: paymentInterval, + PaymentIntervalIncrease: paymentIntervalIncrease, + UnsealPrice: unsealPrice, + }, nil +} + +// DealID is an identifier for a retrieval deal (unique to a client) +type DealID uint64 + +func (d DealID) String() string { + return fmt.Sprintf("%d", d) +} + +// DealProposal is a proposal for a new retrieval deal +type DealProposal struct { + PayloadCID cid.Cid + ID DealID + Params +} + +// Type method makes DealProposal usable as a voucher +func (dp *DealProposal) Type() datatransfer.TypeIdentifier { + return "RetrievalDealProposal/1" +} + +// DealProposalUndefined is an undefined deal proposal +var DealProposalUndefined = DealProposal{} + +// DealResponse is a response to a retrieval deal proposal +type DealResponse struct { + Status DealStatus + ID DealID + + // payment required to proceed + PaymentOwed abi.TokenAmount + + Message string +} + +// Type method makes DealResponse usable as a voucher result +func (dr *DealResponse) Type() datatransfer.TypeIdentifier { + return "RetrievalDealResponse/1" +} + +// DealResponseUndefined is an undefined deal response +var DealResponseUndefined = DealResponse{} + +// DealPayment is a payment for an in progress retrieval deal +type DealPayment struct { + ID DealID + PaymentChannel address.Address + PaymentVoucher *paychtypes.SignedVoucher +} + +// Type method makes DealPayment usable as a voucher +func (dr *DealPayment) Type() datatransfer.TypeIdentifier { + return "RetrievalDealPayment/1" +} + +// DealPaymentUndefined is an undefined deal payment +var DealPaymentUndefined = DealPayment{} + +var ( + // ErrNotFound means a piece was not found during retrieval + ErrNotFound = errors.New("not found") + + // ErrVerification means a retrieval contained a block response that did not verify + ErrVerification = errors.New("Error when verify data") +) + +type Ask struct { + PricePerByte abi.TokenAmount + UnsealPrice abi.TokenAmount + PaymentInterval uint64 + PaymentIntervalIncrease uint64 +} + +// ShortfallErorr is an error that indicates a short fall of funds +type ShortfallError struct { + shortfall abi.TokenAmount +} + +// NewShortfallError returns a new error indicating a shortfall of funds +func NewShortfallError(shortfall abi.TokenAmount) error { + return ShortfallError{shortfall} +} + +// Shortfall returns the numerical value of the shortfall +func (se ShortfallError) Shortfall() abi.TokenAmount { + return se.shortfall +} +func (se ShortfallError) Error() string { + return fmt.Sprintf("Inssufficient Funds. Shortfall: %s", se.shortfall.String()) +} + +// ChannelAvailableFunds provides information about funds in a channel +type ChannelAvailableFunds struct { + // ConfirmedAmt is the amount of funds that have been confirmed on-chain + // for the channel + ConfirmedAmt abi.TokenAmount + // PendingAmt is the amount of funds that are pending confirmation on-chain + PendingAmt abi.TokenAmount + // PendingWaitSentinel can be used with PaychGetWaitReady to wait for + // confirmation of pending funds + PendingWaitSentinel *cid.Cid + // QueuedAmt is the amount that is queued up behind a pending request + QueuedAmt abi.TokenAmount + // VoucherRedeemedAmt is the amount that is redeemed by vouchers on-chain + // and in the local datastore + VoucherReedeemedAmt abi.TokenAmount +} + +// PricingInput provides input parameters required to price a retrieval deal. +type PricingInput struct { + // PayloadCID is the cid of the payload to retrieve. + PayloadCID cid.Cid + // PieceCID is the cid of the Piece from which the Payload will be retrieved. + PieceCID cid.Cid + // PieceSize is the size of the Piece from which the payload will be retrieved. + PieceSize abi.UnpaddedPieceSize + // Client is the peerID of the retrieval client. + Client peer.ID + // VerifiedDeal is true if there exists a verified storage deal for the PayloadCID. + VerifiedDeal bool + // Unsealed is true if there exists an unsealed sector from which we can retrieve the given payload. + Unsealed bool + // CurrentAsk is the current configured ask in the ask-store. + CurrentAsk Ask +} + +// RetrievalClient is a client interface for making retrieval deals +type RetrievalClient interface { + + // NextID generates a new deal ID. + NextID() DealID + + // Start initializes the client by running migrations + Start(ctx context.Context) error + + // OnReady registers a listener for when the client comes on line + OnReady(shared.ReadyFunc) + + // Find Providers finds retrieval providers who may be storing a given piece + FindProviders(payloadCID cid.Cid) []RetrievalPeer + + // Query asks a provider for information about a piece it is storing + Query( + ctx context.Context, + p RetrievalPeer, + payloadCID cid.Cid, + params QueryParams, + ) (QueryResponse, error) + + // Retrieve retrieves all or part of a piece with the given retrieval parameters + Retrieve( + ctx context.Context, + id DealID, + payloadCID cid.Cid, + params Params, + totalFunds abi.TokenAmount, + p RetrievalPeer, + clientWallet address.Address, + minerWallet address.Address, + ) (DealID, error) + + // SubscribeToEvents listens for events that happen related to client retrievals + SubscribeToEvents(subscriber ClientSubscriber) Unsubscribe + + // V1 + + // TryRestartInsufficientFunds attempts to restart any deals stuck in the insufficient funds state + // after funds are added to a given payment channel + TryRestartInsufficientFunds(paymentChannel address.Address) error + + // CancelDeal attempts to cancel an inprogress deal + CancelDeal(id DealID) error + + // GetDeal returns a given deal by deal ID, if it exists + GetDeal(dealID DealID) (ClientDealState, error) + + // ListDeals returns all deals + ListDeals() (map[DealID]ClientDealState, error) +} + +// ClientSubscriber is a callback that is registered to listen for retrieval events +type ClientSubscriber func(event ClientEvent, state ClientDealState) diff --git a/retrievalmarket/types/legacyretrievaltypes/types_cbor_gen.go b/retrievalmarket/types/legacyretrievaltypes/types_cbor_gen.go new file mode 100644 index 000000000..64d1e4acc --- /dev/null +++ b/retrievalmarket/types/legacyretrievaltypes/types_cbor_gen.go @@ -0,0 +1,2909 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package legacyretrievaltypes + +import ( + "fmt" + "io" + "math" + "sort" + + "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/markets/piecestore" + "github.com/filecoin-project/go-state-types/builtin/v8/paych" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +func (t *Query) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.PayloadCID (cid.Cid) (struct) + if len("PayloadCID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PayloadCID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PayloadCID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PayloadCID")); err != nil { + return err + } + + if err := cbg.WriteCid(cw, t.PayloadCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PayloadCID: %w", err) + } + + // t.QueryParams (legacyretrievaltypes.QueryParams) (struct) + if len("QueryParams") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"QueryParams\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("QueryParams"))); err != nil { + return err + } + if _, err := cw.WriteString(string("QueryParams")); err != nil { + return err + } + + if err := t.QueryParams.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *Query) UnmarshalCBOR(r io.Reader) (err error) { + *t = Query{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("Query: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.PayloadCID (cid.Cid) (struct) + case "PayloadCID": + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PayloadCID: %w", err) + } + + t.PayloadCID = c + + } + // t.QueryParams (legacyretrievaltypes.QueryParams) (struct) + case "QueryParams": + + { + + if err := t.QueryParams.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.QueryParams: %w", err) + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *QueryResponse) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{169}); err != nil { + return err + } + + // t.Size (uint64) (uint64) + if len("Size") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Size\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Size"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Size")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Size)); err != nil { + return err + } + + // t.Status (legacyretrievaltypes.QueryResponseStatus) (uint64) + if len("Status") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Status\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Status"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Status")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Message)); err != nil { + return err + } + + // t.UnsealPrice (big.Int) (struct) + if len("UnsealPrice") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"UnsealPrice\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("UnsealPrice"))); err != nil { + return err + } + if _, err := cw.WriteString(string("UnsealPrice")); err != nil { + return err + } + + if err := t.UnsealPrice.MarshalCBOR(cw); err != nil { + return err + } + + // t.PieceCIDFound (legacyretrievaltypes.QueryItemStatus) (uint64) + if len("PieceCIDFound") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceCIDFound\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PieceCIDFound"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PieceCIDFound")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PieceCIDFound)); err != nil { + return err + } + + // t.PaymentAddress (address.Address) (struct) + if len("PaymentAddress") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentAddress\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PaymentAddress"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PaymentAddress")); err != nil { + return err + } + + if err := t.PaymentAddress.MarshalCBOR(cw); err != nil { + return err + } + + // t.MinPricePerByte (big.Int) (struct) + if len("MinPricePerByte") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MinPricePerByte\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("MinPricePerByte"))); err != nil { + return err + } + if _, err := cw.WriteString(string("MinPricePerByte")); err != nil { + return err + } + + if err := t.MinPricePerByte.MarshalCBOR(cw); err != nil { + return err + } + + // t.MaxPaymentInterval (uint64) (uint64) + if len("MaxPaymentInterval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MaxPaymentInterval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("MaxPaymentInterval"))); err != nil { + return err + } + if _, err := cw.WriteString(string("MaxPaymentInterval")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.MaxPaymentInterval)); err != nil { + return err + } + + // t.MaxPaymentIntervalIncrease (uint64) (uint64) + if len("MaxPaymentIntervalIncrease") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MaxPaymentIntervalIncrease\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("MaxPaymentIntervalIncrease"))); err != nil { + return err + } + if _, err := cw.WriteString(string("MaxPaymentIntervalIncrease")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.MaxPaymentIntervalIncrease)); err != nil { + return err + } + + return nil +} + +func (t *QueryResponse) UnmarshalCBOR(r io.Reader) (err error) { + *t = QueryResponse{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("QueryResponse: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Size (uint64) (uint64) + case "Size": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Size = uint64(extra) + + } + // t.Status (legacyretrievaltypes.QueryResponseStatus) (uint64) + case "Status": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = QueryResponseStatus(extra) + + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.UnsealPrice (big.Int) (struct) + case "UnsealPrice": + + { + + if err := t.UnsealPrice.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.UnsealPrice: %w", err) + } + + } + // t.PieceCIDFound (legacyretrievaltypes.QueryItemStatus) (uint64) + case "PieceCIDFound": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PieceCIDFound = QueryItemStatus(extra) + + } + // t.PaymentAddress (address.Address) (struct) + case "PaymentAddress": + + { + + if err := t.PaymentAddress.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentAddress: %w", err) + } + + } + // t.MinPricePerByte (big.Int) (struct) + case "MinPricePerByte": + + { + + if err := t.MinPricePerByte.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.MinPricePerByte: %w", err) + } + + } + // t.MaxPaymentInterval (uint64) (uint64) + case "MaxPaymentInterval": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.MaxPaymentInterval = uint64(extra) + + } + // t.MaxPaymentIntervalIncrease (uint64) (uint64) + case "MaxPaymentIntervalIncrease": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.MaxPaymentIntervalIncrease = uint64(extra) + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DealProposal) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{163}); err != nil { + return err + } + + // t.ID (legacyretrievaltypes.DealID) (uint64) + if len("ID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("ID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ID)); err != nil { + return err + } + + // t.Params (legacyretrievaltypes.Params) (struct) + if len("Params") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Params\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Params"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Params")); err != nil { + return err + } + + if err := t.Params.MarshalCBOR(cw); err != nil { + return err + } + + // t.PayloadCID (cid.Cid) (struct) + if len("PayloadCID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PayloadCID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PayloadCID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PayloadCID")); err != nil { + return err + } + + if err := cbg.WriteCid(cw, t.PayloadCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PayloadCID: %w", err) + } + + return nil +} + +func (t *DealProposal) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealProposal{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealProposal: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.ID (legacyretrievaltypes.DealID) (uint64) + case "ID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.ID = DealID(extra) + + } + // t.Params (legacyretrievaltypes.Params) (struct) + case "Params": + + { + + if err := t.Params.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Params: %w", err) + } + + } + // t.PayloadCID (cid.Cid) (struct) + case "PayloadCID": + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PayloadCID: %w", err) + } + + t.PayloadCID = c + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DealResponse) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{164}); err != nil { + return err + } + + // t.ID (legacyretrievaltypes.DealID) (uint64) + if len("ID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("ID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ID)); err != nil { + return err + } + + // t.Status (legacyretrievaltypes.DealStatus) (uint64) + if len("Status") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Status\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Status"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Status")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Message)); err != nil { + return err + } + + // t.PaymentOwed (big.Int) (struct) + if len("PaymentOwed") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentOwed\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PaymentOwed"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PaymentOwed")); err != nil { + return err + } + + if err := t.PaymentOwed.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *DealResponse) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealResponse{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealResponse: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.ID (legacyretrievaltypes.DealID) (uint64) + case "ID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.ID = DealID(extra) + + } + // t.Status (legacyretrievaltypes.DealStatus) (uint64) + case "Status": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = DealStatus(extra) + + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.PaymentOwed (big.Int) (struct) + case "PaymentOwed": + + { + + if err := t.PaymentOwed.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentOwed: %w", err) + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *Params) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{166}); err != nil { + return err + } + + // t.PieceCID (cid.Cid) (struct) + if len("PieceCID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceCID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PieceCID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PieceCID")); err != nil { + return err + } + + if t.PieceCID == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PieceCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) + } + } + + // t.Selector (typegen.Deferred) (struct) + if len("Selector") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Selector\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Selector"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Selector")); err != nil { + return err + } + + if err := t.Selector.MarshalCBOR(cw); err != nil { + return err + } + + // t.UnsealPrice (big.Int) (struct) + if len("UnsealPrice") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"UnsealPrice\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("UnsealPrice"))); err != nil { + return err + } + if _, err := cw.WriteString(string("UnsealPrice")); err != nil { + return err + } + + if err := t.UnsealPrice.MarshalCBOR(cw); err != nil { + return err + } + + // t.PricePerByte (big.Int) (struct) + if len("PricePerByte") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PricePerByte\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PricePerByte"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PricePerByte")); err != nil { + return err + } + + if err := t.PricePerByte.MarshalCBOR(cw); err != nil { + return err + } + + // t.PaymentInterval (uint64) (uint64) + if len("PaymentInterval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentInterval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PaymentInterval"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PaymentInterval")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PaymentInterval)); err != nil { + return err + } + + // t.PaymentIntervalIncrease (uint64) (uint64) + if len("PaymentIntervalIncrease") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentIntervalIncrease\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PaymentIntervalIncrease"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PaymentIntervalIncrease")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PaymentIntervalIncrease)); err != nil { + return err + } + + return nil +} + +func (t *Params) UnmarshalCBOR(r io.Reader) (err error) { + *t = Params{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("Params: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.PieceCID (cid.Cid) (struct) + case "PieceCID": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + } + + t.PieceCID = &c + } + + } + // t.Selector (typegen.Deferred) (struct) + case "Selector": + + { + + t.Selector = new(cbg.Deferred) + + if err := t.Selector.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("failed to read deferred field: %w", err) + } + } + // t.UnsealPrice (big.Int) (struct) + case "UnsealPrice": + + { + + if err := t.UnsealPrice.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.UnsealPrice: %w", err) + } + + } + // t.PricePerByte (big.Int) (struct) + case "PricePerByte": + + { + + if err := t.PricePerByte.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PricePerByte: %w", err) + } + + } + // t.PaymentInterval (uint64) (uint64) + case "PaymentInterval": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PaymentInterval = uint64(extra) + + } + // t.PaymentIntervalIncrease (uint64) (uint64) + case "PaymentIntervalIncrease": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PaymentIntervalIncrease = uint64(extra) + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *QueryParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{161}); err != nil { + return err + } + + // t.PieceCID (cid.Cid) (struct) + if len("PieceCID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceCID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PieceCID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PieceCID")); err != nil { + return err + } + + if t.PieceCID == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PieceCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) + } + } + + return nil +} + +func (t *QueryParams) UnmarshalCBOR(r io.Reader) (err error) { + *t = QueryParams{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("QueryParams: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.PieceCID (cid.Cid) (struct) + case "PieceCID": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + } + + t.PieceCID = &c + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DealPayment) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{163}); err != nil { + return err + } + + // t.ID (legacyretrievaltypes.DealID) (uint64) + if len("ID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("ID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ID)); err != nil { + return err + } + + // t.PaymentChannel (address.Address) (struct) + if len("PaymentChannel") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentChannel\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PaymentChannel"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PaymentChannel")); err != nil { + return err + } + + if err := t.PaymentChannel.MarshalCBOR(cw); err != nil { + return err + } + + // t.PaymentVoucher (paych.SignedVoucher) (struct) + if len("PaymentVoucher") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentVoucher\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PaymentVoucher"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PaymentVoucher")); err != nil { + return err + } + + if err := t.PaymentVoucher.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *DealPayment) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealPayment{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealPayment: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.ID (legacyretrievaltypes.DealID) (uint64) + case "ID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.ID = DealID(extra) + + } + // t.PaymentChannel (address.Address) (struct) + case "PaymentChannel": + + { + + if err := t.PaymentChannel.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentChannel: %w", err) + } + + } + // t.PaymentVoucher (paych.SignedVoucher) (struct) + case "PaymentVoucher": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.PaymentVoucher = new(paych.SignedVoucher) + if err := t.PaymentVoucher.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentVoucher pointer: %w", err) + } + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *ClientDealState) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{181}); err != nil { + return err + } + + // t.Sender (peer.ID) (string) + if len("Sender") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Sender\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Sender"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Sender")); err != nil { + return err + } + + if len(t.Sender) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Sender was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Sender))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Sender)); err != nil { + return err + } + + // t.Status (legacyretrievaltypes.DealStatus) (uint64) + if len("Status") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Status\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Status"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Status")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Message)); err != nil { + return err + } + + // t.StoreID (uint64) (uint64) + if len("StoreID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"StoreID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StoreID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("StoreID")); err != nil { + return err + } + + if t.StoreID == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(*t.StoreID)); err != nil { + return err + } + } + + // t.ChannelID (datatransfer.ChannelID) (struct) + if len("ChannelID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ChannelID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ChannelID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("ChannelID")); err != nil { + return err + } + + if err := t.ChannelID.MarshalCBOR(cw); err != nil { + return err + } + + // t.FundsSpent (big.Int) (struct) + if len("FundsSpent") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FundsSpent\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("FundsSpent"))); err != nil { + return err + } + if _, err := cw.WriteString(string("FundsSpent")); err != nil { + return err + } + + if err := t.FundsSpent.MarshalCBOR(cw); err != nil { + return err + } + + // t.TotalFunds (big.Int) (struct) + if len("TotalFunds") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TotalFunds\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("TotalFunds"))); err != nil { + return err + } + if _, err := cw.WriteString(string("TotalFunds")); err != nil { + return err + } + + if err := t.TotalFunds.MarshalCBOR(cw); err != nil { + return err + } + + // t.WaitMsgCID (cid.Cid) (struct) + if len("WaitMsgCID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"WaitMsgCID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("WaitMsgCID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("WaitMsgCID")); err != nil { + return err + } + + if t.WaitMsgCID == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.WaitMsgCID); err != nil { + return xerrors.Errorf("failed to write cid field t.WaitMsgCID: %w", err) + } + } + + // t.MinerWallet (address.Address) (struct) + if len("MinerWallet") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MinerWallet\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("MinerWallet"))); err != nil { + return err + } + if _, err := cw.WriteString(string("MinerWallet")); err != nil { + return err + } + + if err := t.MinerWallet.MarshalCBOR(cw); err != nil { + return err + } + + // t.PaymentInfo (legacyretrievaltypes.PaymentInfo) (struct) + if len("PaymentInfo") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentInfo\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PaymentInfo"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PaymentInfo")); err != nil { + return err + } + + if err := t.PaymentInfo.MarshalCBOR(cw); err != nil { + return err + } + + // t.BytesPaidFor (uint64) (uint64) + if len("BytesPaidFor") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"BytesPaidFor\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("BytesPaidFor"))); err != nil { + return err + } + if _, err := cw.WriteString(string("BytesPaidFor")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.BytesPaidFor)); err != nil { + return err + } + + // t.ClientWallet (address.Address) (struct) + if len("ClientWallet") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ClientWallet\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ClientWallet"))); err != nil { + return err + } + if _, err := cw.WriteString(string("ClientWallet")); err != nil { + return err + } + + if err := t.ClientWallet.MarshalCBOR(cw); err != nil { + return err + } + + // t.DealProposal (legacyretrievaltypes.DealProposal) (struct) + if len("DealProposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealProposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealProposal"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealProposal")); err != nil { + return err + } + + if err := t.DealProposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.TotalReceived (uint64) (uint64) + if len("TotalReceived") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TotalReceived\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("TotalReceived"))); err != nil { + return err + } + if _, err := cw.WriteString(string("TotalReceived")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TotalReceived)); err != nil { + return err + } + + // t.LegacyProtocol (bool) (bool) + if len("LegacyProtocol") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"LegacyProtocol\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("LegacyProtocol"))); err != nil { + return err + } + if _, err := cw.WriteString(string("LegacyProtocol")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.LegacyProtocol); err != nil { + return err + } + + // t.CurrentInterval (uint64) (uint64) + if len("CurrentInterval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CurrentInterval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("CurrentInterval"))); err != nil { + return err + } + if _, err := cw.WriteString(string("CurrentInterval")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.CurrentInterval)); err != nil { + return err + } + + // t.UnsealFundsPaid (big.Int) (struct) + if len("UnsealFundsPaid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"UnsealFundsPaid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("UnsealFundsPaid"))); err != nil { + return err + } + if _, err := cw.WriteString(string("UnsealFundsPaid")); err != nil { + return err + } + + if err := t.UnsealFundsPaid.MarshalCBOR(cw); err != nil { + return err + } + + // t.PaymentRequested (big.Int) (struct) + if len("PaymentRequested") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentRequested\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PaymentRequested"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PaymentRequested")); err != nil { + return err + } + + if err := t.PaymentRequested.MarshalCBOR(cw); err != nil { + return err + } + + // t.VoucherShortfall (big.Int) (struct) + if len("VoucherShortfall") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"VoucherShortfall\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("VoucherShortfall"))); err != nil { + return err + } + if _, err := cw.WriteString(string("VoucherShortfall")); err != nil { + return err + } + + if err := t.VoucherShortfall.MarshalCBOR(cw); err != nil { + return err + } + + // t.AllBlocksReceived (bool) (bool) + if len("AllBlocksReceived") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"AllBlocksReceived\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("AllBlocksReceived"))); err != nil { + return err + } + if _, err := cw.WriteString(string("AllBlocksReceived")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.AllBlocksReceived); err != nil { + return err + } + + // t.LastPaymentRequested (bool) (bool) + if len("LastPaymentRequested") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"LastPaymentRequested\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("LastPaymentRequested"))); err != nil { + return err + } + if _, err := cw.WriteString(string("LastPaymentRequested")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.LastPaymentRequested); err != nil { + return err + } + return nil +} + +func (t *ClientDealState) UnmarshalCBOR(r io.Reader) (err error) { + *t = ClientDealState{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("ClientDealState: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Sender (peer.ID) (string) + case "Sender": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Sender = peer.ID(sval) + } + // t.Status (legacyretrievaltypes.DealStatus) (uint64) + case "Status": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = DealStatus(extra) + + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.StoreID (uint64) (uint64) + case "StoreID": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + typed := uint64(extra) + t.StoreID = &typed + } + + } + // t.ChannelID (datatransfer.ChannelID) (struct) + case "ChannelID": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.ChannelID = new(datatransfer.ChannelID) + if err := t.ChannelID.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ChannelID pointer: %w", err) + } + } + + } + // t.FundsSpent (big.Int) (struct) + case "FundsSpent": + + { + + if err := t.FundsSpent.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.FundsSpent: %w", err) + } + + } + // t.TotalFunds (big.Int) (struct) + case "TotalFunds": + + { + + if err := t.TotalFunds.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.TotalFunds: %w", err) + } + + } + // t.WaitMsgCID (cid.Cid) (struct) + case "WaitMsgCID": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.WaitMsgCID: %w", err) + } + + t.WaitMsgCID = &c + } + + } + // t.MinerWallet (address.Address) (struct) + case "MinerWallet": + + { + + if err := t.MinerWallet.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.MinerWallet: %w", err) + } + + } + // t.PaymentInfo (legacyretrievaltypes.PaymentInfo) (struct) + case "PaymentInfo": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.PaymentInfo = new(PaymentInfo) + if err := t.PaymentInfo.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentInfo pointer: %w", err) + } + } + + } + // t.BytesPaidFor (uint64) (uint64) + case "BytesPaidFor": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.BytesPaidFor = uint64(extra) + + } + // t.ClientWallet (address.Address) (struct) + case "ClientWallet": + + { + + if err := t.ClientWallet.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ClientWallet: %w", err) + } + + } + // t.DealProposal (legacyretrievaltypes.DealProposal) (struct) + case "DealProposal": + + { + + if err := t.DealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealProposal: %w", err) + } + + } + // t.TotalReceived (uint64) (uint64) + case "TotalReceived": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.TotalReceived = uint64(extra) + + } + // t.LegacyProtocol (bool) (bool) + case "LegacyProtocol": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.LegacyProtocol = false + case 21: + t.LegacyProtocol = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.CurrentInterval (uint64) (uint64) + case "CurrentInterval": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.CurrentInterval = uint64(extra) + + } + // t.UnsealFundsPaid (big.Int) (struct) + case "UnsealFundsPaid": + + { + + if err := t.UnsealFundsPaid.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.UnsealFundsPaid: %w", err) + } + + } + // t.PaymentRequested (big.Int) (struct) + case "PaymentRequested": + + { + + if err := t.PaymentRequested.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentRequested: %w", err) + } + + } + // t.VoucherShortfall (big.Int) (struct) + case "VoucherShortfall": + + { + + if err := t.VoucherShortfall.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.VoucherShortfall: %w", err) + } + + } + // t.AllBlocksReceived (bool) (bool) + case "AllBlocksReceived": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.AllBlocksReceived = false + case 21: + t.AllBlocksReceived = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.LastPaymentRequested (bool) (bool) + case "LastPaymentRequested": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.LastPaymentRequested = false + case 21: + t.LastPaymentRequested = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *ProviderDealState) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{171}); err != nil { + return err + } + + // t.Status (legacyretrievaltypes.DealStatus) (uint64) + if len("Status") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Status\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Status"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Status")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Message)); err != nil { + return err + } + + // t.StoreID (uint64) (uint64) + if len("StoreID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"StoreID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StoreID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("StoreID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StoreID)); err != nil { + return err + } + + // t.Receiver (peer.ID) (string) + if len("Receiver") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Receiver\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Receiver"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Receiver")); err != nil { + return err + } + + if len(t.Receiver) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Receiver was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Receiver))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Receiver)); err != nil { + return err + } + + // t.ChannelID (datatransfer.ChannelID) (struct) + if len("ChannelID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ChannelID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ChannelID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("ChannelID")); err != nil { + return err + } + + if err := t.ChannelID.MarshalCBOR(cw); err != nil { + return err + } + + // t.PieceInfo (piecestore.PieceInfo) (struct) + if len("PieceInfo") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceInfo\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PieceInfo"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PieceInfo")); err != nil { + return err + } + + if err := t.PieceInfo.MarshalCBOR(cw); err != nil { + return err + } + + // t.TotalSent (uint64) (uint64) + if len("TotalSent") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TotalSent\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("TotalSent"))); err != nil { + return err + } + if _, err := cw.WriteString(string("TotalSent")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TotalSent)); err != nil { + return err + } + + // t.DealProposal (legacyretrievaltypes.DealProposal) (struct) + if len("DealProposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealProposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealProposal"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealProposal")); err != nil { + return err + } + + if err := t.DealProposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.FundsReceived (big.Int) (struct) + if len("FundsReceived") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FundsReceived\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("FundsReceived"))); err != nil { + return err + } + if _, err := cw.WriteString(string("FundsReceived")); err != nil { + return err + } + + if err := t.FundsReceived.MarshalCBOR(cw); err != nil { + return err + } + + // t.LegacyProtocol (bool) (bool) + if len("LegacyProtocol") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"LegacyProtocol\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("LegacyProtocol"))); err != nil { + return err + } + if _, err := cw.WriteString(string("LegacyProtocol")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.LegacyProtocol); err != nil { + return err + } + + // t.CurrentInterval (uint64) (uint64) + if len("CurrentInterval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CurrentInterval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("CurrentInterval"))); err != nil { + return err + } + if _, err := cw.WriteString(string("CurrentInterval")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.CurrentInterval)); err != nil { + return err + } + + return nil +} + +func (t *ProviderDealState) UnmarshalCBOR(r io.Reader) (err error) { + *t = ProviderDealState{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("ProviderDealState: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Status (legacyretrievaltypes.DealStatus) (uint64) + case "Status": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = DealStatus(extra) + + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.StoreID (uint64) (uint64) + case "StoreID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.StoreID = uint64(extra) + + } + // t.Receiver (peer.ID) (string) + case "Receiver": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Receiver = peer.ID(sval) + } + // t.ChannelID (datatransfer.ChannelID) (struct) + case "ChannelID": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.ChannelID = new(datatransfer.ChannelID) + if err := t.ChannelID.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ChannelID pointer: %w", err) + } + } + + } + // t.PieceInfo (piecestore.PieceInfo) (struct) + case "PieceInfo": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.PieceInfo = new(piecestore.PieceInfo) + if err := t.PieceInfo.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PieceInfo pointer: %w", err) + } + } + + } + // t.TotalSent (uint64) (uint64) + case "TotalSent": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.TotalSent = uint64(extra) + + } + // t.DealProposal (legacyretrievaltypes.DealProposal) (struct) + case "DealProposal": + + { + + if err := t.DealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealProposal: %w", err) + } + + } + // t.FundsReceived (big.Int) (struct) + case "FundsReceived": + + { + + if err := t.FundsReceived.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.FundsReceived: %w", err) + } + + } + // t.LegacyProtocol (bool) (bool) + case "LegacyProtocol": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.LegacyProtocol = false + case 21: + t.LegacyProtocol = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.CurrentInterval (uint64) (uint64) + case "CurrentInterval": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.CurrentInterval = uint64(extra) + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *PaymentInfo) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.Lane (uint64) (uint64) + if len("Lane") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Lane\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Lane"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Lane")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Lane)); err != nil { + return err + } + + // t.PayCh (address.Address) (struct) + if len("PayCh") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PayCh\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PayCh"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PayCh")); err != nil { + return err + } + + if err := t.PayCh.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) (err error) { + *t = PaymentInfo{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("PaymentInfo: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Lane (uint64) (uint64) + case "Lane": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Lane = uint64(extra) + + } + // t.PayCh (address.Address) (struct) + case "PayCh": + + { + + if err := t.PayCh.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PayCh: %w", err) + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *RetrievalPeer) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{163}); err != nil { + return err + } + + // t.ID (peer.ID) (string) + if len("ID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("ID")); err != nil { + return err + } + + if len(t.ID) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.ID was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.ID))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.ID)); err != nil { + return err + } + + // t.Address (address.Address) (struct) + if len("Address") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Address\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Address"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Address")); err != nil { + return err + } + + if err := t.Address.MarshalCBOR(cw); err != nil { + return err + } + + // t.PieceCID (cid.Cid) (struct) + if len("PieceCID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceCID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PieceCID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PieceCID")); err != nil { + return err + } + + if t.PieceCID == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PieceCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) + } + } + + return nil +} + +func (t *RetrievalPeer) UnmarshalCBOR(r io.Reader) (err error) { + *t = RetrievalPeer{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("RetrievalPeer: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.ID (peer.ID) (string) + case "ID": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.ID = peer.ID(sval) + } + // t.Address (address.Address) (struct) + case "Address": + + { + + if err := t.Address.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Address: %w", err) + } + + } + // t.PieceCID (cid.Cid) (struct) + case "PieceCID": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + } + + t.PieceCID = &c + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *Ask) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{164}); err != nil { + return err + } + + // t.UnsealPrice (big.Int) (struct) + if len("UnsealPrice") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"UnsealPrice\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("UnsealPrice"))); err != nil { + return err + } + if _, err := cw.WriteString(string("UnsealPrice")); err != nil { + return err + } + + if err := t.UnsealPrice.MarshalCBOR(cw); err != nil { + return err + } + + // t.PricePerByte (big.Int) (struct) + if len("PricePerByte") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PricePerByte\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PricePerByte"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PricePerByte")); err != nil { + return err + } + + if err := t.PricePerByte.MarshalCBOR(cw); err != nil { + return err + } + + // t.PaymentInterval (uint64) (uint64) + if len("PaymentInterval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentInterval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PaymentInterval"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PaymentInterval")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PaymentInterval)); err != nil { + return err + } + + // t.PaymentIntervalIncrease (uint64) (uint64) + if len("PaymentIntervalIncrease") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentIntervalIncrease\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PaymentIntervalIncrease"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PaymentIntervalIncrease")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PaymentIntervalIncrease)); err != nil { + return err + } + + return nil +} + +func (t *Ask) UnmarshalCBOR(r io.Reader) (err error) { + *t = Ask{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("Ask: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.UnsealPrice (big.Int) (struct) + case "UnsealPrice": + + { + + if err := t.UnsealPrice.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.UnsealPrice: %w", err) + } + + } + // t.PricePerByte (big.Int) (struct) + case "PricePerByte": + + { + + if err := t.PricePerByte.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PricePerByte: %w", err) + } + + } + // t.PaymentInterval (uint64) (uint64) + case "PaymentInterval": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PaymentInterval = uint64(extra) + + } + // t.PaymentIntervalIncrease (uint64) (uint64) + case "PaymentIntervalIncrease": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PaymentIntervalIncrease = uint64(extra) + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} diff --git a/storagemarket/dealfilter/cli.go b/storagemarket/dealfilter/cli.go index 96198d1d2..eb4536c75 100644 --- a/storagemarket/dealfilter/cli.go +++ b/storagemarket/dealfilter/cli.go @@ -6,7 +6,7 @@ import ( "encoding/json" "os/exec" - "github.com/filecoin-project/boost-gfm/retrievalmarket" + "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" "github.com/filecoin-project/boost/storagemarket/funds" "github.com/filecoin-project/boost/storagemarket/sealingpipeline" "github.com/filecoin-project/boost/storagemarket/storagespace" @@ -17,7 +17,7 @@ const agent = "boost" const jsonVersion = "2.2.0" type StorageDealFilter func(ctx context.Context, deal DealFilterParams) (bool, string, error) -type RetrievalDealFilter func(ctx context.Context, deal retrievalmarket.ProviderDealState) (bool, string, error) +type RetrievalDealFilter func(ctx context.Context, deal legacyretrievaltypes.ProviderDealState) (bool, string, error) func CliStorageDealFilter(cmd string) StorageDealFilter { return func(ctx context.Context, deal DealFilterParams) (bool, string, error) { @@ -43,9 +43,9 @@ func CliStorageDealFilter(cmd string) StorageDealFilter { } func CliRetrievalDealFilter(cmd string) RetrievalDealFilter { - return func(ctx context.Context, deal retrievalmarket.ProviderDealState) (bool, string, error) { + return func(ctx context.Context, deal legacyretrievaltypes.ProviderDealState) (bool, string, error) { d := struct { - retrievalmarket.ProviderDealState + legacyretrievaltypes.ProviderDealState DealType string FormatVersion string Agent string diff --git a/storagemarket/helper.go b/storagemarket/helper.go index bd693e3ca..8fd1b1446 100644 --- a/storagemarket/helper.go +++ b/storagemarket/helper.go @@ -6,7 +6,7 @@ import ( "errors" "fmt" - "github.com/filecoin-project/boost-gfm/storagemarket" + "github.com/filecoin-project/boost/storagemarket/types" "github.com/filecoin-project/go-state-types/abi" market8 "github.com/filecoin-project/go-state-types/builtin/v9/market" "github.com/filecoin-project/go-state-types/exitcode" @@ -36,7 +36,7 @@ type CurrentDealInfo struct { PublishMsgTipSet ctypes.TipSetKey } -func (c *ChainDealManager) WaitForPublishDeals(ctx context.Context, publishCid cid.Cid, proposal market8.DealProposal) (*storagemarket.PublishDealsWaitResult, error) { +func (c *ChainDealManager) WaitForPublishDeals(ctx context.Context, publishCid cid.Cid, proposal market8.DealProposal) (*types.PublishDealsWaitResult, error) { // Wait for deal to be published (plus additional time for confidence) receipt, err := c.fullnodeApi.StateWaitMsg(ctx, publishCid, c.cfg.PublishDealsConfidence, api.LookbackNoLimit, true) if err != nil { @@ -58,7 +58,7 @@ func (c *ChainDealManager) WaitForPublishDeals(ctx context.Context, publishCid c return nil, fmt.Errorf("WaitForPublishDeals getting deal info errored: %w", err) } - return &storagemarket.PublishDealsWaitResult{DealID: res.DealID, FinalCid: receipt.Message}, nil + return &types.PublishDealsWaitResult{DealID: res.DealID, FinalCid: receipt.Message}, nil } // GetCurrentDealInfo gets the current deal state and deal ID. diff --git a/storagemarket/lp2pimpl/net.go b/storagemarket/lp2pimpl/net.go index b764498b6..7c8708365 100644 --- a/storagemarket/lp2pimpl/net.go +++ b/storagemarket/lp2pimpl/net.go @@ -6,15 +6,15 @@ import ( "fmt" "time" - "github.com/filecoin-project/boost-gfm/shared" - gfm_storagemarket "github.com/filecoin-project/boost-gfm/storagemarket" - gfm_migration "github.com/filecoin-project/boost-gfm/storagemarket/migrations" - gfm_network "github.com/filecoin-project/boost-gfm/storagemarket/network" "github.com/filecoin-project/boost/api" "github.com/filecoin-project/boost/db" + "github.com/filecoin-project/boost/markets/shared" "github.com/filecoin-project/boost/storagemarket" "github.com/filecoin-project/boost/storagemarket/sealingpipeline" "github.com/filecoin-project/boost/storagemarket/types" + "github.com/filecoin-project/boost/storagemarket/types/legacytypes" + mig "github.com/filecoin-project/boost/storagemarket/types/legacytypes/migrations" + gfm_network "github.com/filecoin-project/boost/storagemarket/types/legacytypes/network" "github.com/filecoin-project/go-address" cborutil "github.com/filecoin-project/go-cbor-util" "github.com/filecoin-project/go-state-types/crypto" @@ -205,11 +205,9 @@ func (p *DealProvider) Start(ctx context.Context) { p.host.SetStreamHandler(DealStatusV12ProtocolID, p.handleNewDealStatusStream) // Handle legacy deal stream here and reject all legacy deals - if !p.enableLegacyDeals { - p.host.SetStreamHandler(gfm_storagemarket.DealProtocolID101, p.handleLegacyDealStream) - p.host.SetStreamHandler(gfm_storagemarket.DealProtocolID110, p.handleLegacyDealStream) - p.host.SetStreamHandler(gfm_storagemarket.DealProtocolID111, p.handleLegacyDealStream) - } + p.host.SetStreamHandler(legacytypes.DealProtocolID101, p.handleLegacyDealStream) + p.host.SetStreamHandler(legacytypes.DealProtocolID110, p.handleLegacyDealStream) + p.host.SetStreamHandler(legacytypes.DealProtocolID111, p.handleLegacyDealStream) } func (p *DealProvider) Stop() { @@ -416,13 +414,13 @@ func (p *DealProvider) handleLegacyDealStream(s network.Stream) { rejMsg := fmt.Sprintf("deal proposals made over the legacy %s protocol are deprecated"+ " - please use the %s deal proposal protocol", s.Protocol(), DealProtocolv121ID) - const rejState = gfm_storagemarket.StorageDealProposalRejected + const rejState = 2 var signedResponse typegen.CBORMarshaler _ = s.SetReadDeadline(time.Now().Add(providerReadDeadline)) switch s.Protocol() { - case gfm_storagemarket.DealProtocolID101: - var prop gfm_migration.Proposal0 + case legacytypes.DealProtocolID101: + var prop mig.Proposal0 err := prop.UnmarshalCBOR(s) _ = s.SetReadDeadline(time.Time{}) // Clear read deadline so conn doesn't get closed if err != nil { @@ -436,17 +434,17 @@ func (p *DealProvider) handleLegacyDealStream(s network.Stream) { return } - resp := gfm_migration.Response0{State: rejState, Message: rejMsg, Proposal: pcid} + resp := mig.Response0{State: rejState, Message: rejMsg, Proposal: pcid} sig, err := p.signLegacyResponse(&resp) if err != nil { reqLog.Errorf("getting signed response: %s", err) return } - signedResponse = &gfm_migration.SignedResponse0{Response: resp, Signature: sig} + signedResponse = &mig.SignedResponse0{Response: resp, Signature: sig} - case gfm_storagemarket.DealProtocolID110: - var prop gfm_migration.Proposal1 + case legacytypes.DealProtocolID110: + var prop mig.Proposal1 err := prop.UnmarshalCBOR(s) _ = s.SetReadDeadline(time.Time{}) // Clear read deadline so conn doesn't get closed if err != nil { @@ -469,7 +467,7 @@ func (p *DealProvider) handleLegacyDealStream(s network.Stream) { signedResponse = &gfm_network.SignedResponse{Response: resp, Signature: sig} - case gfm_storagemarket.DealProtocolID111: + case legacytypes.DealProtocolID111: var prop gfm_network.Proposal err := prop.UnmarshalCBOR(s) _ = s.SetReadDeadline(time.Time{}) // Clear read deadline so conn doesn't get closed diff --git a/storagemarket/provider.go b/storagemarket/provider.go index d48594af4..7f3097369 100644 --- a/storagemarket/provider.go +++ b/storagemarket/provider.go @@ -10,7 +10,6 @@ import ( "sync" "time" - "github.com/filecoin-project/boost-gfm/storagemarket" "github.com/filecoin-project/boost/api" "github.com/filecoin-project/boost/build" "github.com/filecoin-project/boost/db" @@ -24,10 +23,12 @@ import ( "github.com/filecoin-project/boost/storagemarket/types" smtypes "github.com/filecoin-project/boost/storagemarket/types" "github.com/filecoin-project/boost/storagemarket/types/dealcheckpoints" + "github.com/filecoin-project/boost/storagemarket/types/legacytypes" "github.com/filecoin-project/boost/transport" "github.com/filecoin-project/boostd-data/shared/tracing" "github.com/filecoin-project/dagstore" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" lapi "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/v1api" sealing "github.com/filecoin-project/lotus/storage/pipeline" @@ -56,6 +57,13 @@ type SealingPipelineCache struct { CacheError error } +// PackingResult returns information about how a deal was put into a sector +type PackingResult struct { + SectorNumber abi.SectorNumber + Offset abi.PaddedPieceSize + Size abi.PaddedPieceSize +} + // DagstoreShardRegistry provides the one method from the Dagstore that we use // in deal execution: registering a shard type DagstoreShardRegistry interface { @@ -227,8 +235,8 @@ func (p *Provider) DealBySignedProposalCid(ctx context.Context, propCid cid.Cid) return deal, nil } -func (p *Provider) GetAsk() *storagemarket.SignedStorageAsk { - return p.askGetter.GetAsk() +func (p *Provider) GetAsk() *legacytypes.SignedStorageAsk { + return p.askGetter.GetAsk(p.Address) } // ImportOfflineDealData is called when the Storage Provider imports data for @@ -632,7 +640,7 @@ func (p *Provider) CancelDealDataTransfer(dealUuid uuid.UUID) error { return err } -func (p *Provider) AddPieceToSector(ctx context.Context, deal smtypes.ProviderDealState, pieceData io.Reader) (*storagemarket.PackingResult, error) { +func (p *Provider) AddPieceToSector(ctx context.Context, deal smtypes.ProviderDealState, pieceData io.Reader) (*PackingResult, error) { // Sanity check - we must have published the deal before handing it off // to the sealing subsystem if deal.PublishCID == nil { @@ -675,7 +683,7 @@ func (p *Provider) AddPieceToSector(ctx context.Context, deal smtypes.ProviderDe } p.dealLogger.Infow(deal.DealUuid, "added new deal to sector", "sector", sectorNum.String()) - return &storagemarket.PackingResult{ + return &PackingResult{ SectorNumber: sectorNum, Offset: offset, Size: pieceSize.Padded(), diff --git a/storagemarket/provider_test.go b/storagemarket/provider_test.go index 63400954d..b5ea4ed64 100644 --- a/storagemarket/provider_test.go +++ b/storagemarket/provider_test.go @@ -16,8 +16,6 @@ import ( "testing" "time" - "github.com/filecoin-project/boost-gfm/shared_testutil" - "github.com/filecoin-project/boost-gfm/storagemarket" "github.com/filecoin-project/boost/db" "github.com/filecoin-project/boost/fundmanager" "github.com/filecoin-project/boost/piecedirectory" @@ -28,6 +26,7 @@ import ( "github.com/filecoin-project/boost/storagemarket/smtestutil" "github.com/filecoin-project/boost/storagemarket/types" "github.com/filecoin-project/boost/storagemarket/types/dealcheckpoints" + "github.com/filecoin-project/boost/storagemarket/types/legacytypes" "github.com/filecoin-project/boost/testutil" "github.com/filecoin-project/boost/transport" "github.com/filecoin-project/boost/transport/httptransport" @@ -848,12 +847,12 @@ func TestDealAskValidation(t *testing.T) { ctx := context.Background() tcs := map[string]struct { - ask *storagemarket.StorageAsk + ask *legacytypes.StorageAsk dbuilder func(h *ProviderHarness) *testDeal expectedErr string }{ "fails if price below minimum for unverified deal": { - ask: &storagemarket.StorageAsk{ + ask: &legacytypes.StorageAsk{ Price: abi.NewTokenAmount(100000000000), }, dbuilder: func(h *ProviderHarness) *testDeal { @@ -863,7 +862,7 @@ func TestDealAskValidation(t *testing.T) { expectedErr: "storage price per epoch less than asking price", }, "fails if price below minimum for verified deal": { - ask: &storagemarket.StorageAsk{ + ask: &legacytypes.StorageAsk{ Price: abi.NewTokenAmount(0), VerifiedPrice: abi.NewTokenAmount(100000000000), }, @@ -874,7 +873,7 @@ func TestDealAskValidation(t *testing.T) { expectedErr: "storage price per epoch less than asking price", }, "fails if piece size below minimum": { - ask: &storagemarket.StorageAsk{ + ask: &legacytypes.StorageAsk{ Price: abi.NewTokenAmount(0), MinPieceSize: abi.PaddedPieceSize(1000000000), }, @@ -885,7 +884,7 @@ func TestDealAskValidation(t *testing.T) { expectedErr: "piece size less than minimum required size", }, "fails if piece size above maximum": { - ask: &storagemarket.StorageAsk{ + ask: &legacytypes.StorageAsk{ Price: abi.NewTokenAmount(0), MaxPieceSize: abi.PaddedPieceSize(1), }, @@ -920,14 +919,14 @@ func TestDealVerification(t *testing.T) { ctx := context.Background() tcs := map[string]struct { - ask *storagemarket.StorageAsk + ask *legacytypes.StorageAsk dbuilder func(t *testing.T, h *ProviderHarness) *testDeal expectedErr string expect func(h *ProviderHarness) opts []harnessOpt }{ "fails if client does not have enough datacap for verified deal": { - ask: &storagemarket.StorageAsk{ + ask: &legacytypes.StorageAsk{ VerifiedPrice: abi.NewTokenAmount(0), }, dbuilder: func(_ *testing.T, h *ProviderHarness) *testDeal { @@ -941,7 +940,7 @@ func TestDealVerification(t *testing.T) { expectedErr: "verified deal DataCap 1 too small", }, "fails if can't fetch datacap for verified deal": { - ask: &storagemarket.StorageAsk{ + ask: &legacytypes.StorageAsk{ VerifiedPrice: abi.NewTokenAmount(0), }, dbuilder: func(_ *testing.T, h *ProviderHarness) *testDeal { @@ -954,7 +953,7 @@ func TestDealVerification(t *testing.T) { expectedErr: "getting verified datacap", }, "fails if client does NOT have enough balance for deal": { - ask: &storagemarket.StorageAsk{ + ask: &legacytypes.StorageAsk{ Price: abi.NewTokenAmount(0), }, dbuilder: func(_ *testing.T, h *ProviderHarness) *testDeal { @@ -964,7 +963,7 @@ func TestDealVerification(t *testing.T) { expectedErr: "funds in escrow 0 not enough", }, "fails if client signature is not valid": { - ask: &storagemarket.StorageAsk{ + ask: &legacytypes.StorageAsk{ Price: abi.NewTokenAmount(0), }, dbuilder: func(_ *testing.T, h *ProviderHarness) *testDeal { @@ -976,7 +975,7 @@ func TestDealVerification(t *testing.T) { expectedErr: "invalid signature", }, "fails if client signature verification fails": { - ask: &storagemarket.StorageAsk{ + ask: &legacytypes.StorageAsk{ Price: abi.NewTokenAmount(0), }, dbuilder: func(_ *testing.T, h *ProviderHarness) *testDeal { @@ -988,7 +987,7 @@ func TestDealVerification(t *testing.T) { expectedErr: "validating signature", }, "fails if proposed provider collateral below minimum": { - ask: &storagemarket.StorageAsk{ + ask: &legacytypes.StorageAsk{ Price: abi.NewTokenAmount(0), }, dbuilder: func(_ *testing.T, h *ProviderHarness) *testDeal { @@ -997,7 +996,7 @@ func TestDealVerification(t *testing.T) { expectedErr: "proposed provider collateral 0 below minimum", }, "fails if proposed provider collateral above maximum": { - ask: &storagemarket.StorageAsk{ + ask: &legacytypes.StorageAsk{ Price: abi.NewTokenAmount(0), }, dbuilder: func(_ *testing.T, h *ProviderHarness) *testDeal { @@ -1007,7 +1006,7 @@ func TestDealVerification(t *testing.T) { }, "fails if provider address does not match": { - ask: &storagemarket.StorageAsk{ + ask: &legacytypes.StorageAsk{ Price: abi.NewTokenAmount(0), }, dbuilder: func(t *testing.T, h *ProviderHarness) *testDeal { @@ -1018,7 +1017,7 @@ func TestDealVerification(t *testing.T) { expectedErr: "incorrect provider for deal", }, "proposal piece cid has wrong prefix": { - ask: &storagemarket.StorageAsk{ + ask: &legacytypes.StorageAsk{ Price: abi.NewTokenAmount(0), }, dbuilder: func(t *testing.T, h *ProviderHarness) *testDeal { @@ -1027,7 +1026,7 @@ func TestDealVerification(t *testing.T) { expectedErr: "proposal PieceCID had wrong prefix", }, "proposal piece cid undefined": { - ask: &storagemarket.StorageAsk{ + ask: &legacytypes.StorageAsk{ Price: abi.NewTokenAmount(0), }, dbuilder: func(t *testing.T, h *ProviderHarness) *testDeal { @@ -1036,7 +1035,7 @@ func TestDealVerification(t *testing.T) { expectedErr: "proposal PieceCID undefined", }, "proposal end 9 before proposal start 10": { - ask: &storagemarket.StorageAsk{ + ask: &legacytypes.StorageAsk{ Price: abi.NewTokenAmount(0), }, dbuilder: func(t *testing.T, h *ProviderHarness) *testDeal { @@ -1045,7 +1044,7 @@ func TestDealVerification(t *testing.T) { expectedErr: "proposal end 9 before proposal start 10", }, "deal start epoch has already elapsed": { - ask: &storagemarket.StorageAsk{ + ask: &legacytypes.StorageAsk{ Price: abi.NewTokenAmount(0), }, dbuilder: func(t *testing.T, h *ProviderHarness) *testDeal { @@ -1054,7 +1053,7 @@ func TestDealVerification(t *testing.T) { expectedErr: "deal start epoch -1 has already elapsed", }, "deal piece size invalid": { - ask: &storagemarket.StorageAsk{ + ask: &legacytypes.StorageAsk{ Price: abi.NewTokenAmount(0), }, dbuilder: func(t *testing.T, h *ProviderHarness) *testDeal { @@ -1063,7 +1062,7 @@ func TestDealVerification(t *testing.T) { expectedErr: "proposal piece size is invalid", }, "deal end epoch too far out": { - ask: &storagemarket.StorageAsk{ + ask: &legacytypes.StorageAsk{ Price: abi.NewTokenAmount(0), }, dbuilder: func(t *testing.T, h *ProviderHarness) *testDeal { @@ -1074,7 +1073,7 @@ func TestDealVerification(t *testing.T) { expectedErr: "invalid deal end epoch", }, "deal duration greater than max duration": { - ask: &storagemarket.StorageAsk{ + ask: &legacytypes.StorageAsk{ Price: abi.NewTokenAmount(0), }, dbuilder: func(t *testing.T, h *ProviderHarness) *testDeal { @@ -1084,7 +1083,7 @@ func TestDealVerification(t *testing.T) { expectedErr: "deal duration out of bounds", }, "deal duration less than min duration": { - ask: &storagemarket.StorageAsk{ + ask: &legacytypes.StorageAsk{ Price: abi.NewTokenAmount(0), }, dbuilder: func(t *testing.T, h *ProviderHarness) *testDeal { @@ -1373,8 +1372,7 @@ type ProviderHarness struct { Transport transport.Transport - SqlDB *sql.DB - DAGStore *shared_testutil.MockDagStoreWrapper + SqlDB *sql.DB } type ChainHeadFn func(ctx context.Context) (*chaintypes.TipSet, error) @@ -2380,21 +2378,20 @@ func (td *testDeal) assertDealFailedNonRecoverable(t *testing.T, ctx context.Con } type mockAskStore struct { - ask *storagemarket.StorageAsk + ask *legacytypes.StorageAsk } func (m *mockAskStore) SetAsk(price, verifiedPrice abi.TokenAmount, minPieceSize, maxPieceSize abi.PaddedPieceSize) { - m.ask = &storagemarket.StorageAsk{ + m.ask = &legacytypes.StorageAsk{ Price: price, VerifiedPrice: verifiedPrice, MinPieceSize: minPieceSize, MaxPieceSize: maxPieceSize, } - } -func (m *mockAskStore) GetAsk() *storagemarket.SignedStorageAsk { - return &storagemarket.SignedStorageAsk{ +func (m *mockAskStore) GetAsk(miner address.Address) *legacytypes.SignedStorageAsk { + return &legacytypes.SignedStorageAsk{ Ask: m.ask, } } diff --git a/storagemarket/smtestutil/mocks.go b/storagemarket/smtestutil/mocks.go index b618c4953..f48dc437c 100644 --- a/storagemarket/smtestutil/mocks.go +++ b/storagemarket/smtestutil/mocks.go @@ -4,12 +4,12 @@ import ( "bytes" "context" "fmt" - "github.com/filecoin-project/go-address" "io" "strings" "sync" - "github.com/filecoin-project/boost-gfm/storagemarket" + "github.com/filecoin-project/go-address" + pdtypes "github.com/filecoin-project/boost/piecedirectory/types" mock_piecedirectory "github.com/filecoin-project/boost/piecedirectory/types/mocks" mock_sealingpipeline "github.com/filecoin-project/boost/storagemarket/sealingpipeline/mock" @@ -131,8 +131,8 @@ func (mb *MinerStubBuilder) SetupNoOp() *MinerStubBuilder { return mb.publishCid, nil }).AnyTimes() - mb.stub.MockChainDealManager.EXPECT().WaitForPublishDeals(gomock.Any(), gomock.Eq(mb.publishCid), gomock.Eq(mb.dp.ClientDealProposal.Proposal)).DoAndReturn(func(_ context.Context, _ cid.Cid, _ market.DealProposal) (*storagemarket.PublishDealsWaitResult, error) { - return &storagemarket.PublishDealsWaitResult{ + mb.stub.MockChainDealManager.EXPECT().WaitForPublishDeals(gomock.Any(), gomock.Eq(mb.publishCid), gomock.Eq(mb.dp.ClientDealProposal.Proposal)).DoAndReturn(func(_ context.Context, _ cid.Cid, _ market.DealProposal) (*types.PublishDealsWaitResult, error) { + return &types.PublishDealsWaitResult{ DealID: mb.dealId, FinalCid: mb.finalPublishCid, }, nil @@ -241,7 +241,7 @@ func (mb *MinerStubBuilder) SetupPublishConfirm(blocking bool) *MinerStubBuilder } mb.stub.lk.Unlock() - mb.stub.MockChainDealManager.EXPECT().WaitForPublishDeals(gomock.Any(), gomock.Eq(mb.publishCid), gomock.Eq(mb.dp.ClientDealProposal.Proposal)).DoAndReturn(func(ctx context.Context, _ cid.Cid, _ market.DealProposal) (*storagemarket.PublishDealsWaitResult, error) { + mb.stub.MockChainDealManager.EXPECT().WaitForPublishDeals(gomock.Any(), gomock.Eq(mb.publishCid), gomock.Eq(mb.dp.ClientDealProposal.Proposal)).DoAndReturn(func(ctx context.Context, _ cid.Cid, _ market.DealProposal) (*types.PublishDealsWaitResult, error) { mb.stub.lk.Lock() ch := mb.stub.unblockWaitForPublish[mb.dp.DealUUID] mb.stub.lk.Unlock() @@ -257,7 +257,7 @@ func (mb *MinerStubBuilder) SetupPublishConfirm(blocking bool) *MinerStubBuilder return nil, ctx.Err() } - return &storagemarket.PublishDealsWaitResult{ + return &types.PublishDealsWaitResult{ DealID: mb.dealId, FinalCid: mb.finalPublishCid, }, nil @@ -267,7 +267,7 @@ func (mb *MinerStubBuilder) SetupPublishConfirm(blocking bool) *MinerStubBuilder } func (mb *MinerStubBuilder) SetupPublishConfirmFailure(err error) *MinerStubBuilder { - mb.stub.MockChainDealManager.EXPECT().WaitForPublishDeals(gomock.Any(), gomock.Eq(mb.publishCid), gomock.Eq(mb.dp.ClientDealProposal.Proposal)).DoAndReturn(func(_ context.Context, _ cid.Cid, _ market.DealProposal) (*storagemarket.PublishDealsWaitResult, error) { + mb.stub.MockChainDealManager.EXPECT().WaitForPublishDeals(gomock.Any(), gomock.Eq(mb.publishCid), gomock.Eq(mb.dp.ClientDealProposal.Proposal)).DoAndReturn(func(_ context.Context, _ cid.Cid, _ market.DealProposal) (*types.PublishDealsWaitResult, error) { return nil, err }) diff --git a/storagemarket/storedask/storedask.go b/storagemarket/storedask/storedask.go new file mode 100644 index 000000000..f9099952a --- /dev/null +++ b/storagemarket/storedask/storedask.go @@ -0,0 +1,215 @@ +package storedask + +import ( + "context" + "database/sql" + "errors" + "fmt" + "sync" + + "github.com/filecoin-project/boost/db" + "github.com/filecoin-project/boost/markets/shared" + "github.com/filecoin-project/boost/node/config" + "github.com/filecoin-project/boost/storagemarket/types/legacytypes" + "github.com/filecoin-project/go-address" + cborutil "github.com/filecoin-project/go-cbor-util" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" + logging "github.com/ipfs/go-log/v2" + "go.uber.org/fx" + "golang.org/x/xerrors" +) + +var log = logging.Logger("storedask") + +// DefaultPrice is the default price for unverified deals (in attoFil / GiB / Epoch) +var DefaultPrice = abi.NewTokenAmount(500000000) + +// DefaultVerifiedPrice is the default price for verified deals (in attoFil / GiB / Epoch) +var DefaultVerifiedPrice = abi.NewTokenAmount(50000000) + +// DefaultDuration is the default number of epochs a storage ask is in effect for +const DefaultDuration abi.ChainEpoch = 1000000 + +// DefaultMinPieceSize is the minimum accepted piece size for data +const DefaultMinPieceSize abi.PaddedPieceSize = 256 + +// DefaultMaxPieceSize is the default maximum accepted size for pieces for deals +// TODO: It would be nice to default this to the miner's sector size +const DefaultMaxPieceSize abi.PaddedPieceSize = 1 << 20 + +type StoredAsk struct { + askLk sync.RWMutex + asks map[address.Address]*legacytypes.SignedStorageAsk + fullNode api.FullNode + db *db.StorageAskDB +} + +// NewStoredAsk returns a new instance of StoredAsk +// It will initialize a new SignedStorageAsk on disk if one is not set +// Otherwise it loads the current SignedStorageAsk from disk +func NewStoredAsk(cfg *config.Boost) func(lc fx.Lifecycle, db *db.StorageAskDB, fullNode api.FullNode) (*StoredAsk, error) { + return func(lc fx.Lifecycle, db *db.StorageAskDB, fullNode api.FullNode) (*StoredAsk, error) { + s := &StoredAsk{ + fullNode: fullNode, + db: db, + asks: make(map[address.Address]*legacytypes.SignedStorageAsk), + } + + ctx := context.Background() + + var minerIDs []address.Address + miner, err := address.NewFromString(cfg.Wallets.Miner) + if err != nil { + return nil, fmt.Errorf("converting miner ID from config: %w", err) + } + minerIDs = append(minerIDs, miner) + + for _, m := range minerIDs { + ask, err := s.getSignedAsk(ctx, m) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + // If not found set everything to default + serr := s.SetAsk(ctx, DefaultPrice, DefaultVerifiedPrice, DefaultDuration, m) + if serr == nil { + continue + } + return nil, fmt.Errorf("setting default ask for miner id %s: %w", m.String(), serr) + } + return nil, fmt.Errorf("failed to initialise AskStore: %w", err) + } + s.asks[m] = &ask + } + + return s, nil + } +} + +func signBytes(ctx context.Context, signer address.Address, b []byte, f api.FullNode) (*crypto.Signature, error) { + signer, err := f.StateAccountKey(ctx, signer, types.EmptyTSK) + if err != nil { + return nil, err + } + + localSignature, err := f.WalletSign(ctx, signer, b) + if err != nil { + return nil, err + } + return localSignature, nil +} + +func getMinerWorkerAddress(ctx context.Context, maddr address.Address, tok shared.TipSetToken, f api.FullNode) (address.Address, error) { + tsk, err := types.TipSetKeyFromBytes(tok) + if err != nil { + return address.Undef, err + } + + mi, err := f.StateMinerInfo(ctx, maddr, tsk) + if err != nil { + return address.Address{}, err + } + return mi.Worker, nil +} + +func (s *StoredAsk) sign(ctx context.Context, ask *legacytypes.StorageAsk) (*crypto.Signature, error) { + tok, err := s.fullNode.ChainHead(ctx) + if err != nil { + return nil, err + } + + return signMinerData(ctx, ask, ask.Miner, tok.Key().Bytes(), s.fullNode) +} + +// SignMinerData signs the given data structure with a signature for the given address +func signMinerData(ctx context.Context, data interface{}, address address.Address, tok shared.TipSetToken, f api.FullNode) (*crypto.Signature, error) { + msg, err := cborutil.Dump(data) + if err != nil { + return nil, xerrors.Errorf("serializing: %w", err) + } + + worker, err := getMinerWorkerAddress(ctx, address, tok, f) + if err != nil { + return nil, err + } + + sig, err := signBytes(ctx, worker, msg, f) + if err != nil { + return nil, xerrors.Errorf("failed to sign: %w", err) + } + return sig, nil +} + +func (s *StoredAsk) GetAsk(miner address.Address) *legacytypes.SignedStorageAsk { + s.askLk.RLock() + defer s.askLk.RUnlock() + + return s.asks[miner] +} + +func (s *StoredAsk) SetAsk(ctx context.Context, price abi.TokenAmount, verifiedPrice abi.TokenAmount, duration abi.ChainEpoch, miner address.Address, options ...legacytypes.StorageAskOption) error { + s.askLk.Lock() + defer s.askLk.Unlock() + var seqno uint64 + minPieceSize := DefaultMinPieceSize + maxPieceSize := DefaultMaxPieceSize + + oldAsk, ok := s.asks[miner] + if ok { + seqno = oldAsk.Ask.SeqNo + 1 + minPieceSize = oldAsk.Ask.MinPieceSize + maxPieceSize = oldAsk.Ask.MaxPieceSize + } + + ts, err := s.fullNode.ChainHead(ctx) + if err != nil { + return err + } + ask := &legacytypes.StorageAsk{ + Price: price, + VerifiedPrice: verifiedPrice, + Timestamp: ts.Height(), + Expiry: ts.Height() + duration, + Miner: miner, + SeqNo: seqno, + MinPieceSize: minPieceSize, + MaxPieceSize: maxPieceSize, + } + + for _, option := range options { + option(ask) + } + + sig, err := s.sign(ctx, ask) + if err != nil { + return err + } + + s.asks[miner] = &legacytypes.SignedStorageAsk{ + Ask: ask, + Signature: sig, + } + return s.storeAsk(ctx, *ask) + +} + +func (s *StoredAsk) getSignedAsk(ctx context.Context, miner address.Address) (legacytypes.SignedStorageAsk, error) { + ask, err := s.db.Get(ctx, miner) + if err != nil { + return legacytypes.SignedStorageAsk{}, err + } + ss, err := s.sign(ctx, &ask) + if err != nil { + return legacytypes.SignedStorageAsk{}, nil + } + + return legacytypes.SignedStorageAsk{ + Ask: &ask, + Signature: ss, + }, nil +} + +func (s *StoredAsk) storeAsk(ctx context.Context, ask legacytypes.StorageAsk) error { + return s.db.Update(ctx, ask) +} diff --git a/storagemarket/types/legacytypes/dealstatus.go b/storagemarket/types/legacytypes/dealstatus.go new file mode 100644 index 000000000..b4fb388fa --- /dev/null +++ b/storagemarket/types/legacytypes/dealstatus.go @@ -0,0 +1,235 @@ +package legacytypes + +import ( + "github.com/filecoin-project/go-statemachine/fsm" +) + +// StorageDealStatus is the local status of a StorageDeal. +// Note: this status has meaning in the context of this module only - it is not +// recorded on chain +type StorageDealStatus = uint64 + +const ( + // StorageDealUnknown means the current status of a deal is undefined + StorageDealUnknown = StorageDealStatus(iota) + + // StorageDealProposalNotFound is a status returned in responses when the deal itself cannot + // be located + StorageDealProposalNotFound + + // StorageDealProposalRejected is returned by a StorageProvider when it chooses not to accept + // a DealProposal + StorageDealProposalRejected + + // StorageDealProposalAccepted indicates an intent to accept a storage deal proposal + StorageDealProposalAccepted + + // StorageDealStaged means a deal has been published and data is ready to be put into a sector + StorageDealStaged + + // StorageDealSealing means a deal is in a sector that is being sealed + StorageDealSealing + + // StorageDealFinalizing means a deal is in a sealed sector and we're doing final + // housekeeping before marking it active + StorageDealFinalizing + + // StorageDealActive means a deal is in a sealed sector and the miner is proving the data + // for the deal + StorageDealActive + + // StorageDealExpired means a deal has passed its final epoch and is expired + StorageDealExpired + + // StorageDealSlashed means the deal was in a sector that got slashed from failing to prove + StorageDealSlashed + + // StorageDealRejecting means the Provider has rejected the deal, and will send a rejection response + StorageDealRejecting + + // StorageDealFailing means something has gone wrong in a deal. Once data is cleaned up the deal will finalize on + // StorageDealError + StorageDealFailing + + // StorageDealFundsReserved means we've deposited funds as necessary to create a deal, ready to move forward + StorageDealFundsReserved + + // StorageDealCheckForAcceptance means the client is waiting for a provider to seal and publish a deal + StorageDealCheckForAcceptance + + // StorageDealValidating means the provider is validating that deal parameters are good for a proposal + StorageDealValidating + + // StorageDealAcceptWait means the provider is running any custom decision logic to decide whether or not to accept the deal + StorageDealAcceptWait + + // StorageDealStartDataTransfer means data transfer is beginning + StorageDealStartDataTransfer + + // StorageDealTransferring means data is being sent from the client to the provider via the data transfer module + StorageDealTransferring + + // StorageDealWaitingForData indicates either a manual transfer + // or that the provider has not received a data transfer request from the client + StorageDealWaitingForData + + // StorageDealVerifyData means data has been transferred and we are attempting to verify it against the PieceCID + StorageDealVerifyData + + // StorageDealReserveProviderFunds means that provider is making sure it has adequate funds for the deal in the StorageMarketActor + StorageDealReserveProviderFunds + + // StorageDealReserveClientFunds means that client is making sure it has adequate funds for the deal in the StorageMarketActor + StorageDealReserveClientFunds + + // StorageDealProviderFunding means that the provider has deposited funds in the StorageMarketActor and it is waiting + // to see the funds appear in its balance + StorageDealProviderFunding + + // StorageDealClientFunding means that the client has deposited funds in the StorageMarketActor and it is waiting + // to see the funds appear in its balance + StorageDealClientFunding + + // StorageDealPublish means the deal is ready to be published on chain + StorageDealPublish + + // StorageDealPublishing means the deal has been published but we are waiting for it to appear on chain + StorageDealPublishing + + // StorageDealError means the deal has failed due to an error, and no further updates will occur + StorageDealError + + // StorageDealProviderTransferAwaitRestart means the provider has restarted while data + // was being transferred from client to provider, and will wait for the client to + // resume the transfer + StorageDealProviderTransferAwaitRestart + + // StorageDealClientTransferRestart means a storage deal data transfer from client to provider will be restarted + // by the client + StorageDealClientTransferRestart + + // StorageDealAwaitingPreCommit means a deal is ready and must be pre-committed + StorageDealAwaitingPreCommit + + // StorageDealTransferQueued means the data transfer request has been queued and will be executed soon. + StorageDealTransferQueued +) + +// DealStates maps StorageDealStatus codes to string names +var DealStates = map[StorageDealStatus]string{ + StorageDealUnknown: "StorageDealUnknown", + StorageDealProposalNotFound: "StorageDealProposalNotFound", + StorageDealProposalRejected: "StorageDealProposalRejected", + StorageDealProposalAccepted: "StorageDealProposalAccepted", + StorageDealAcceptWait: "StorageDealAcceptWait", + StorageDealStartDataTransfer: "StorageDealStartDataTransfer", + StorageDealStaged: "StorageDealStaged", + StorageDealAwaitingPreCommit: "StorageDealAwaitingPreCommit", + StorageDealSealing: "StorageDealSealing", + StorageDealActive: "StorageDealActive", + StorageDealExpired: "StorageDealExpired", + StorageDealSlashed: "StorageDealSlashed", + StorageDealRejecting: "StorageDealRejecting", + StorageDealFailing: "StorageDealFailing", + StorageDealFundsReserved: "StorageDealFundsReserved", + StorageDealCheckForAcceptance: "StorageDealCheckForAcceptance", + StorageDealValidating: "StorageDealValidating", + StorageDealTransferring: "StorageDealTransferring", + StorageDealWaitingForData: "StorageDealWaitingForData", + StorageDealVerifyData: "StorageDealVerifyData", + StorageDealReserveProviderFunds: "StorageDealReserveProviderFunds", + StorageDealReserveClientFunds: "StorageDealReserveClientFunds", + StorageDealProviderFunding: "StorageDealProviderFunding", + StorageDealClientFunding: "StorageDealClientFunding", + StorageDealPublish: "StorageDealPublish", + StorageDealPublishing: "StorageDealPublishing", + StorageDealError: "StorageDealError", + StorageDealFinalizing: "StorageDealFinalizing", + StorageDealClientTransferRestart: "StorageDealClientTransferRestart", + StorageDealProviderTransferAwaitRestart: "StorageDealProviderTransferAwaitRestart", + StorageDealTransferQueued: "StorageDealTransferQueued", +} + +// DealStatesDescriptions maps StorageDealStatus codes to string description for better UX +var DealStatesDescriptions = map[StorageDealStatus]string{ + StorageDealUnknown: "Unknown", + StorageDealProposalNotFound: "Proposal not found", + StorageDealProposalRejected: "Proposal rejected", + StorageDealProposalAccepted: "Proposal accepted", + StorageDealAcceptWait: "AcceptWait", + StorageDealStartDataTransfer: "Starting data transfer", + StorageDealStaged: "Staged", + StorageDealAwaitingPreCommit: "Awaiting a PreCommit message on chain", + StorageDealSealing: "Sealing", + StorageDealActive: "Active", + StorageDealExpired: "Expired", + StorageDealSlashed: "Slashed", + StorageDealRejecting: "Rejecting", + StorageDealFailing: "Failing", + StorageDealFundsReserved: "FundsReserved", + StorageDealCheckForAcceptance: "Checking for deal acceptance", + StorageDealValidating: "Validating", + StorageDealTransferring: "Transferring", + StorageDealWaitingForData: "Waiting for data", + StorageDealVerifyData: "Verifying data", + StorageDealReserveProviderFunds: "Reserving provider funds", + StorageDealReserveClientFunds: "Reserving client funds", + StorageDealProviderFunding: "Provider funding", + StorageDealClientFunding: "Client funding", + StorageDealPublish: "Publish", + StorageDealPublishing: "Publishing", + StorageDealError: "Error", + StorageDealFinalizing: "Finalizing", + StorageDealClientTransferRestart: "Client transfer restart", + StorageDealProviderTransferAwaitRestart: "ProviderTransferAwaitRestart", +} + +var DealStatesDurations = map[StorageDealStatus]string{ + StorageDealUnknown: "", + StorageDealProposalNotFound: "", + StorageDealProposalRejected: "", + StorageDealProposalAccepted: "a few minutes", + StorageDealAcceptWait: "a few minutes", + StorageDealStartDataTransfer: "a few minutes", + StorageDealStaged: "a few minutes", + StorageDealAwaitingPreCommit: "a few minutes", + StorageDealSealing: "a few hours", + StorageDealActive: "", + StorageDealExpired: "", + StorageDealSlashed: "", + StorageDealRejecting: "", + StorageDealFailing: "", + StorageDealFundsReserved: "a few minutes", + StorageDealCheckForAcceptance: "a few minutes", + StorageDealValidating: "a few minutes", + StorageDealTransferring: "a few minutes", + StorageDealWaitingForData: "a few minutes", + StorageDealVerifyData: "a few minutes", + StorageDealReserveProviderFunds: "a few minutes", + StorageDealReserveClientFunds: "a few minutes", + StorageDealProviderFunding: "a few minutes", + StorageDealClientFunding: "a few minutes", + StorageDealPublish: "a few minutes", + StorageDealPublishing: "a few minutes", + StorageDealError: "", + StorageDealFinalizing: "a few minutes", + StorageDealClientTransferRestart: "depending on data size, anywhere between a few minutes to a few hours", + StorageDealProviderTransferAwaitRestart: "a few minutes", +} + +// ProviderFinalityStates are the states that terminate deal processing for a deal. +// When a provider restarts, it restarts only deals that are not in a finality state. +var ProviderFinalityStates = []fsm.StateKey{ + StorageDealError, + StorageDealSlashed, + StorageDealExpired, +} + +// StatesKnownBySealingSubsystem are the states on the happy path after hand-off to +// the sealing subsystem +var StatesKnownBySealingSubsystem = []fsm.StateKey{ + StorageDealAwaitingPreCommit, + StorageDealSealing, + StorageDealFinalizing, + StorageDealActive, +} diff --git a/storagemarket/types/legacytypes/filestore/file.go b/storagemarket/types/legacytypes/filestore/file.go new file mode 100644 index 000000000..119ced0f1 --- /dev/null +++ b/storagemarket/types/legacytypes/filestore/file.go @@ -0,0 +1,39 @@ +package filestore + +import ( + "os" + "path" +) + +type fd struct { + *os.File + filename string + basepath string +} + +func newFile(basepath OsPath, filename Path) (File, error) { + var err error + result := fd{filename: string(filename), basepath: string(basepath)} + full := path.Join(string(basepath), string(filename)) + result.File, err = os.OpenFile(full, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0644) + if err != nil { + return nil, err + } + return &result, nil +} + +func (f fd) Path() Path { + return Path(f.filename) +} + +func (f fd) OsPath() OsPath { + return OsPath(f.Name()) +} + +func (f fd) Size() int64 { + info, err := os.Stat(f.Name()) + if err != nil { + return -1 + } + return info.Size() +} diff --git a/storagemarket/types/legacytypes/filestore/filestore.go b/storagemarket/types/legacytypes/filestore/filestore.go new file mode 100644 index 000000000..a9c802102 --- /dev/null +++ b/storagemarket/types/legacytypes/filestore/filestore.go @@ -0,0 +1,83 @@ +package filestore + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" +) + +type fileStore struct { + base string +} + +// NewLocalFileStore creates a filestore mounted on a given local directory path +func NewLocalFileStore(baseDir OsPath) (FileStore, error) { + base, err := checkIsDir(string(baseDir)) + if err != nil { + return nil, err + } + return &fileStore{base}, nil +} + +func (fs fileStore) filename(p Path) string { + return filepath.Join(fs.base, string(p)) +} + +func (fs fileStore) Open(p Path) (File, error) { + name := fs.filename(p) + if _, err := os.Stat(name); err != nil { + return nil, fmt.Errorf("error trying to open %s: %s", name, err.Error()) + } + return newFile(OsPath(fs.base), p) +} + +func (fs fileStore) Create(p Path) (File, error) { + name := fs.filename(p) + if _, err := os.Stat(name); err == nil { + return nil, fmt.Errorf("file %s already exists", name) + } + return newFile(OsPath(fs.base), p) +} + +func (fs fileStore) Store(p Path, src File) (Path, error) { + dest, err := fs.Create(p) + if err != nil { + return Path(""), err + } + + if _, err = io.Copy(dest, src); err != nil { + dest.Close() + return Path(""), err + } + return p, dest.Close() +} + +func (fs fileStore) Delete(p Path) error { + filename := string(p) + full := path.Join(string(fs.base), string(filename)) + return os.Remove(full) +} + +func (fs fileStore) CreateTemp() (File, error) { + f, err := ioutil.TempFile(fs.base, "fstmp") + if err != nil { + return nil, err + } + filename := filepath.Base(f.Name()) + return &fd{File: f, basepath: fs.base, filename: filename}, nil +} + +func checkIsDir(baseDir string) (string, error) { + base := filepath.Clean(string(baseDir)) + info, err := os.Stat(base) + if err != nil { + return "", fmt.Errorf("error getting %s info: %s", base, err.Error()) + } + if !info.IsDir() { + return "", fmt.Errorf("%s is not a directory", base) + } + return base, nil +} diff --git a/storagemarket/types/legacytypes/filestore/types.go b/storagemarket/types/legacytypes/filestore/types.go new file mode 100644 index 000000000..d3e840319 --- /dev/null +++ b/storagemarket/types/legacytypes/filestore/types.go @@ -0,0 +1,38 @@ +package filestore + +import ( + "io" +) + +// Path represents an abstract path to a file +type Path string + +// OsPath represents a path that can be located on +// the operating system with standard os.File operations +type OsPath string + +// File is a wrapper around an os file +type File interface { + Path() Path + OsPath() OsPath + Size() int64 + + io.Closer + io.Reader + io.Writer + io.Seeker +} + +// FileStore is an abstract filestore, used for storing temporary file data +// when handing off a deal to the Storage Mining module. Files are created by +// the storage market module, their path is given to the storage mining module +// when AddPiece is called. The Storage Mining module then reads from them +// from the FileStore, and deletes them once they have been sealed in a sector +type FileStore interface { + Open(p Path) (File, error) + Create(p Path) (File, error) + Store(p Path, f File) (Path, error) + Delete(p Path) error + + CreateTemp() (File, error) +} diff --git a/storagemarket/types/legacytypes/migrations/migrations.go b/storagemarket/types/legacytypes/migrations/migrations.go new file mode 100644 index 000000000..5a9ba6752 --- /dev/null +++ b/storagemarket/types/legacytypes/migrations/migrations.go @@ -0,0 +1,325 @@ +package migrations + +import ( + "fmt" + "unicode/utf8" + + "github.com/filecoin-project/boost/storagemarket/types/legacytypes" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/net/context" + + "github.com/filecoin-project/go-address" + versioning "github.com/filecoin-project/go-ds-versioning/pkg" + "github.com/filecoin-project/go-ds-versioning/pkg/versioned" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/crypto" + marketOld "github.com/filecoin-project/specs-actors/actors/builtin/market" + + "github.com/filecoin-project/boost/storagemarket/types/legacytypes/filestore" +) + +//go:generate cbor-gen-for ClientDeal0 MinerDeal0 Balance0 SignedStorageAsk0 StorageAsk0 DataRef0 ProviderDealState0 AskRequest0 AskResponse0 Proposal0 Response0 SignedResponse0 DealStatusRequest0 DealStatusResponse0 + +// Balance0 is version 0 of Balance +type Balance0 struct { + Locked abi.TokenAmount + Available abi.TokenAmount +} + +// StorageAsk0 is version 0 of StorageAsk +type StorageAsk0 struct { + Price abi.TokenAmount + VerifiedPrice abi.TokenAmount + + MinPieceSize abi.PaddedPieceSize + MaxPieceSize abi.PaddedPieceSize + Miner address.Address + Timestamp abi.ChainEpoch + Expiry abi.ChainEpoch + SeqNo uint64 +} + +// SignedStorageAsk0 is version 0 of SignedStorageAsk +type SignedStorageAsk0 struct { + Ask *StorageAsk0 + Signature *crypto.Signature +} + +// MinerDeal0 is version 0 of MinerDeal +type MinerDeal0 struct { + marketOld.ClientDealProposal + ProposalCid cid.Cid + AddFundsCid *cid.Cid + PublishCid *cid.Cid + Miner peer.ID + Client peer.ID + State legacytypes.StorageDealStatus + PiecePath filestore.Path + MetadataPath filestore.Path + SlashEpoch abi.ChainEpoch + FastRetrieval bool + Message string + StoreID *uint64 + FundsReserved abi.TokenAmount + Ref *DataRef0 + AvailableForRetrieval bool + + DealID abi.DealID + CreationTime cbg.CborTime +} + +// ClientDeal0 is version 0 of ClientDeal +type ClientDeal0 struct { + market.ClientDealProposal + ProposalCid cid.Cid + AddFundsCid *cid.Cid + State legacytypes.StorageDealStatus + Miner peer.ID + MinerWorker address.Address + DealID abi.DealID + DataRef *DataRef0 + Message string + PublishMessage *cid.Cid + SlashEpoch abi.ChainEpoch + PollRetryCount uint64 + PollErrorCount uint64 + FastRetrieval bool + StoreID *uint64 + FundsReserved abi.TokenAmount + CreationTime cbg.CborTime +} + +// DataRef0 is version 0 of DataRef +type DataRef0 struct { + TransferType string + Root cid.Cid + PieceCid *cid.Cid + PieceSize abi.UnpaddedPieceSize +} + +// ProviderDealState0 is version 0 of ProviderDealState +type ProviderDealState0 struct { + State legacytypes.StorageDealStatus + Message string + Proposal *market.DealProposal + ProposalCid *cid.Cid + AddFundsCid *cid.Cid + PublishCid *cid.Cid + DealID abi.DealID + FastRetrieval bool +} + +// Proposal0 is version 0 of Proposal +type Proposal0 struct { + DealProposal *market.ClientDealProposal + Piece *DataRef0 + FastRetrieval bool +} + +// Response0 is version 0 of Response +type Response0 struct { + State legacytypes.StorageDealStatus + + // DealProposalRejected + Message string + Proposal cid.Cid + + // StorageDealProposalAccepted + PublishMessage *cid.Cid +} + +// SignedResponse0 is version 0 of SignedResponse +type SignedResponse0 struct { + Response Response0 + Signature *crypto.Signature +} + +// AskRequest0 is version 0 of AskRequest +type AskRequest0 struct { + Miner address.Address +} + +// AskResponse0 is version 0 of AskResponse +type AskResponse0 struct { + Ask *SignedStorageAsk0 +} + +// DealStatusRequest0 is version 0 of DealStatusRequest +type DealStatusRequest0 struct { + Proposal cid.Cid + Signature crypto.Signature +} + +// DealStatusResponse0 is version 0 of DealStatusResponse +type DealStatusResponse0 struct { + DealState ProviderDealState0 + Signature crypto.Signature +} + +// MigrateDataRef0To1 migrates a tuple encoded data tref to a map encoded data ref +func MigrateDataRef0To1(oldDr *DataRef0) *legacytypes.DataRef { + if oldDr == nil { + return nil + } + return &legacytypes.DataRef{ + TransferType: oldDr.TransferType, + Root: oldDr.Root, + PieceCid: oldDr.PieceCid, + PieceSize: oldDr.PieceSize, + } +} + +// MigrateClientDeal0To1 migrates a tuple encoded client deal to a map encoded client deal +func MigrateClientDeal0To1(oldCd *ClientDeal0) (*legacytypes.ClientDeal, error) { + return &legacytypes.ClientDeal{ + ClientDealProposal: oldCd.ClientDealProposal, + ProposalCid: oldCd.ProposalCid, + AddFundsCid: oldCd.AddFundsCid, + State: oldCd.State, + Miner: oldCd.Miner, + MinerWorker: oldCd.MinerWorker, + DealID: oldCd.DealID, + DataRef: MigrateDataRef0To1(oldCd.DataRef), + Message: oldCd.Message, + PublishMessage: oldCd.PublishMessage, + SlashEpoch: oldCd.SlashEpoch, + PollRetryCount: oldCd.PollRetryCount, + PollErrorCount: oldCd.PollErrorCount, + FastRetrieval: oldCd.FastRetrieval, + FundsReserved: oldCd.FundsReserved, + CreationTime: oldCd.CreationTime, + }, nil +} + +// MigrateMinerDeal0To1 migrates a tuple encoded miner deal to a map encoded miner deal +func MigrateMinerDeal0To1(oldCd *MinerDeal0) (*MinerDeal1, error) { + return &MinerDeal1{ + ClientDealProposal: oldCd.ClientDealProposal, + ProposalCid: oldCd.ProposalCid, + AddFundsCid: oldCd.AddFundsCid, + PublishCid: oldCd.PublishCid, + Miner: oldCd.Miner, + Client: oldCd.Client, + State: oldCd.State, + PiecePath: oldCd.PiecePath, + MetadataPath: oldCd.MetadataPath, + SlashEpoch: oldCd.SlashEpoch, + FastRetrieval: oldCd.FastRetrieval, + Message: oldCd.Message, + FundsReserved: oldCd.FundsReserved, + Ref: MigrateDataRef0To1(oldCd.Ref), + AvailableForRetrieval: oldCd.AvailableForRetrieval, + DealID: oldCd.DealID, + CreationTime: oldCd.CreationTime, + }, nil +} + +// MigrateMinerDeal1To2 migrates a miner deal label to the new format +func MigrateMinerDeal1To2(oldCd *MinerDeal1) (*legacytypes.MinerDeal, error) { + clientDealProp, err := MigrateClientDealProposal0To1(oldCd.ClientDealProposal) + if err != nil { + return nil, fmt.Errorf("migrating deal with proposal cid %s: %w", oldCd.ProposalCid, err) + } + + return &legacytypes.MinerDeal{ + ClientDealProposal: *clientDealProp, + ProposalCid: oldCd.ProposalCid, + AddFundsCid: oldCd.AddFundsCid, + PublishCid: oldCd.PublishCid, + Miner: oldCd.Miner, + Client: oldCd.Client, + State: oldCd.State, + PiecePath: oldCd.PiecePath, + MetadataPath: oldCd.MetadataPath, + SlashEpoch: oldCd.SlashEpoch, + FastRetrieval: oldCd.FastRetrieval, + Message: oldCd.Message, + FundsReserved: oldCd.FundsReserved, + Ref: oldCd.Ref, + AvailableForRetrieval: oldCd.AvailableForRetrieval, + DealID: oldCd.DealID, + CreationTime: oldCd.CreationTime, + }, nil +} + +func MigrateClientDealProposal0To1(prop marketOld.ClientDealProposal) (*legacytypes.ClientDealProposal, error) { + oldLabel := prop.Proposal.Label + + var err error + var newLabel market.DealLabel + if utf8.ValidString(oldLabel) { + newLabel, err = market.NewLabelFromString(oldLabel) + if err != nil { + return nil, fmt.Errorf("migrating deal label to DealLabel (string): %w", err) + } + } else { + newLabel, err = market.NewLabelFromBytes([]byte(oldLabel)) + if err != nil { + return nil, fmt.Errorf("migrating deal label to DealLabel (byte): %w", err) + } + } + + return &legacytypes.ClientDealProposal{ + ClientSignature: prop.ClientSignature, + Proposal: market.DealProposal{ + PieceCID: prop.Proposal.PieceCID, + PieceSize: prop.Proposal.PieceSize, + VerifiedDeal: prop.Proposal.VerifiedDeal, + Client: prop.Proposal.Client, + Provider: prop.Proposal.Provider, + Label: newLabel, + StartEpoch: prop.Proposal.StartEpoch, + EndEpoch: prop.Proposal.EndEpoch, + StoragePricePerEpoch: prop.Proposal.StoragePricePerEpoch, + ProviderCollateral: prop.Proposal.ProviderCollateral, + ClientCollateral: prop.Proposal.ClientCollateral, + }, + }, nil +} + +// MigrateStorageAsk0To1 migrates a tuple encoded storage ask to a map encoded storage ask +func MigrateStorageAsk0To1(oldSa *StorageAsk0) *legacytypes.StorageAsk { + return &legacytypes.StorageAsk{ + Price: oldSa.Price, + VerifiedPrice: oldSa.VerifiedPrice, + + MinPieceSize: oldSa.MinPieceSize, + MaxPieceSize: oldSa.MaxPieceSize, + Miner: oldSa.Miner, + Timestamp: oldSa.Timestamp, + Expiry: oldSa.Expiry, + SeqNo: oldSa.SeqNo, + } +} + +// GetMigrateSignedStorageAsk0To1 returns a function that migrates a tuple encoded signed storage ask to a map encoded signed storage ask +// It needs a signing function to resign the ask -- there's no way around that +func GetMigrateSignedStorageAsk0To1(sign func(ctx context.Context, ask *legacytypes.StorageAsk) (*crypto.Signature, error)) func(*SignedStorageAsk0) (*legacytypes.SignedStorageAsk, error) { + return func(oldSsa *SignedStorageAsk0) (*legacytypes.SignedStorageAsk, error) { + newSa := MigrateStorageAsk0To1(oldSsa.Ask) + sig, err := sign(context.TODO(), newSa) + if err != nil { + return nil, err + } + return &legacytypes.SignedStorageAsk{ + Ask: newSa, + Signature: sig, + }, nil + } +} + +// ClientMigrations are migrations for the client's store of storage deals +var ClientMigrations = versioned.BuilderList{ + versioned.NewVersionedBuilder(MigrateClientDeal0To1, versioning.VersionKey("1")), +} + +// ProviderMigrations are migrations for the providers's store of storage deals +var ProviderMigrations = versioned.BuilderList{ + versioned.NewVersionedBuilder(MigrateMinerDeal0To1, versioning.VersionKey("1")).FilterKeys([]string{ + "/latest-ask", "/storage-ask/latest", "/storage-ask/1/latest", "/storage-ask/versions/current"}), + versioned.NewVersionedBuilder(MigrateMinerDeal1To2, versioning.VersionKey("2")).FilterKeys([]string{ + "/latest-ask", "/storage-ask/latest", "/storage-ask/1/latest", "/storage-ask/versions/current"}).OldVersion("1"), +} diff --git a/storagemarket/types/legacytypes/migrations/migrations_cbor_gen.go b/storagemarket/types/legacytypes/migrations/migrations_cbor_gen.go new file mode 100644 index 000000000..6610cfec8 --- /dev/null +++ b/storagemarket/types/legacytypes/migrations/migrations_cbor_gen.go @@ -0,0 +1,2271 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package migrations + +import ( + "fmt" + "io" + "math" + "sort" + + filestore "github.com/filecoin-project/boost/storagemarket/types/legacytypes/filestore" + abi "github.com/filecoin-project/go-state-types/abi" + market "github.com/filecoin-project/go-state-types/builtin/v9/market" + crypto "github.com/filecoin-project/go-state-types/crypto" + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p/core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +var lengthBufClientDeal0 = []byte{145} + +func (t *ClientDeal0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufClientDeal0); err != nil { + return err + } + + // t.ClientDealProposal (market.ClientDealProposal) (struct) + if err := t.ClientDealProposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.ProposalCid (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.ProposalCid); err != nil { + return xerrors.Errorf("failed to write cid field t.ProposalCid: %w", err) + } + + // t.AddFundsCid (cid.Cid) (struct) + + if t.AddFundsCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.AddFundsCid); err != nil { + return xerrors.Errorf("failed to write cid field t.AddFundsCid: %w", err) + } + } + + // t.State (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.State)); err != nil { + return err + } + + // t.Miner (peer.ID) (string) + if len(t.Miner) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Miner was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Miner))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Miner)); err != nil { + return err + } + + // t.MinerWorker (address.Address) (struct) + if err := t.MinerWorker.MarshalCBOR(cw); err != nil { + return err + } + + // t.DealID (abi.DealID) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.DataRef (migrations.DataRef0) (struct) + if err := t.DataRef.MarshalCBOR(cw); err != nil { + return err + } + + // t.Message (string) (string) + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Message)); err != nil { + return err + } + + // t.PublishMessage (cid.Cid) (struct) + + if t.PublishMessage == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PublishMessage); err != nil { + return xerrors.Errorf("failed to write cid field t.PublishMessage: %w", err) + } + } + + // t.SlashEpoch (abi.ChainEpoch) (int64) + if t.SlashEpoch >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SlashEpoch)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.SlashEpoch-1)); err != nil { + return err + } + } + + // t.PollRetryCount (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PollRetryCount)); err != nil { + return err + } + + // t.PollErrorCount (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PollErrorCount)); err != nil { + return err + } + + // t.FastRetrieval (bool) (bool) + if err := cbg.WriteBool(w, t.FastRetrieval); err != nil { + return err + } + + // t.StoreID (uint64) (uint64) + + if t.StoreID == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(*t.StoreID)); err != nil { + return err + } + } + + // t.FundsReserved (big.Int) (struct) + if err := t.FundsReserved.MarshalCBOR(cw); err != nil { + return err + } + + // t.CreationTime (typegen.CborTime) (struct) + if err := t.CreationTime.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *ClientDeal0) UnmarshalCBOR(r io.Reader) (err error) { + *t = ClientDeal0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 17 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.ClientDealProposal (market.ClientDealProposal) (struct) + + { + + if err := t.ClientDealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ClientDealProposal: %w", err) + } + + } + // t.ProposalCid (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ProposalCid: %w", err) + } + + t.ProposalCid = c + + } + // t.AddFundsCid (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.AddFundsCid: %w", err) + } + + t.AddFundsCid = &c + } + + } + // t.State (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.State = uint64(extra) + + } + // t.Miner (peer.ID) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Miner = peer.ID(sval) + } + // t.MinerWorker (address.Address) (struct) + + { + + if err := t.MinerWorker.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.MinerWorker: %w", err) + } + + } + // t.DealID (abi.DealID) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.DataRef (migrations.DataRef0) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.DataRef = new(DataRef0) + if err := t.DataRef.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DataRef pointer: %w", err) + } + } + + } + // t.Message (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.PublishMessage (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishMessage: %w", err) + } + + t.PublishMessage = &c + } + + } + // t.SlashEpoch (abi.ChainEpoch) (int64) + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative overflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.SlashEpoch = abi.ChainEpoch(extraI) + } + // t.PollRetryCount (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PollRetryCount = uint64(extra) + + } + // t.PollErrorCount (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PollErrorCount = uint64(extra) + + } + // t.FastRetrieval (bool) (bool) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.FastRetrieval = false + case 21: + t.FastRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.StoreID (uint64) (uint64) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + typed := uint64(extra) + t.StoreID = &typed + } + + } + // t.FundsReserved (big.Int) (struct) + + { + + if err := t.FundsReserved.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.FundsReserved: %w", err) + } + + } + // t.CreationTime (typegen.CborTime) (struct) + + { + + if err := t.CreationTime.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.CreationTime: %w", err) + } + + } + return nil +} + +var lengthBufMinerDeal0 = []byte{146} + +func (t *MinerDeal0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufMinerDeal0); err != nil { + return err + } + + // t.ClientDealProposal (market.ClientDealProposal) (struct) + if err := t.ClientDealProposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.ProposalCid (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.ProposalCid); err != nil { + return xerrors.Errorf("failed to write cid field t.ProposalCid: %w", err) + } + + // t.AddFundsCid (cid.Cid) (struct) + + if t.AddFundsCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.AddFundsCid); err != nil { + return xerrors.Errorf("failed to write cid field t.AddFundsCid: %w", err) + } + } + + // t.PublishCid (cid.Cid) (struct) + + if t.PublishCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PublishCid); err != nil { + return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err) + } + } + + // t.Miner (peer.ID) (string) + if len(t.Miner) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Miner was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Miner))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Miner)); err != nil { + return err + } + + // t.Client (peer.ID) (string) + if len(t.Client) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Client was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Client))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Client)); err != nil { + return err + } + + // t.State (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.State)); err != nil { + return err + } + + // t.PiecePath (filestore.Path) (string) + if len(t.PiecePath) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.PiecePath was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.PiecePath))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.PiecePath)); err != nil { + return err + } + + // t.MetadataPath (filestore.Path) (string) + if len(t.MetadataPath) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.MetadataPath was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.MetadataPath))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.MetadataPath)); err != nil { + return err + } + + // t.SlashEpoch (abi.ChainEpoch) (int64) + if t.SlashEpoch >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SlashEpoch)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.SlashEpoch-1)); err != nil { + return err + } + } + + // t.FastRetrieval (bool) (bool) + if err := cbg.WriteBool(w, t.FastRetrieval); err != nil { + return err + } + + // t.Message (string) (string) + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Message)); err != nil { + return err + } + + // t.StoreID (uint64) (uint64) + + if t.StoreID == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(*t.StoreID)); err != nil { + return err + } + } + + // t.FundsReserved (big.Int) (struct) + if err := t.FundsReserved.MarshalCBOR(cw); err != nil { + return err + } + + // t.Ref (migrations.DataRef0) (struct) + if err := t.Ref.MarshalCBOR(cw); err != nil { + return err + } + + // t.AvailableForRetrieval (bool) (bool) + if err := cbg.WriteBool(w, t.AvailableForRetrieval); err != nil { + return err + } + + // t.DealID (abi.DealID) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.CreationTime (typegen.CborTime) (struct) + if err := t.CreationTime.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *MinerDeal0) UnmarshalCBOR(r io.Reader) (err error) { + *t = MinerDeal0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 18 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.ClientDealProposal (market.ClientDealProposal) (struct) + + { + + if err := t.ClientDealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ClientDealProposal: %w", err) + } + + } + // t.ProposalCid (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ProposalCid: %w", err) + } + + t.ProposalCid = c + + } + // t.AddFundsCid (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.AddFundsCid: %w", err) + } + + t.AddFundsCid = &c + } + + } + // t.PublishCid (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) + } + + t.PublishCid = &c + } + + } + // t.Miner (peer.ID) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Miner = peer.ID(sval) + } + // t.Client (peer.ID) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Client = peer.ID(sval) + } + // t.State (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.State = uint64(extra) + + } + // t.PiecePath (filestore.Path) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.PiecePath = filestore.Path(sval) + } + // t.MetadataPath (filestore.Path) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.MetadataPath = filestore.Path(sval) + } + // t.SlashEpoch (abi.ChainEpoch) (int64) + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative overflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.SlashEpoch = abi.ChainEpoch(extraI) + } + // t.FastRetrieval (bool) (bool) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.FastRetrieval = false + case 21: + t.FastRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.Message (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.StoreID (uint64) (uint64) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + typed := uint64(extra) + t.StoreID = &typed + } + + } + // t.FundsReserved (big.Int) (struct) + + { + + if err := t.FundsReserved.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.FundsReserved: %w", err) + } + + } + // t.Ref (migrations.DataRef0) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Ref = new(DataRef0) + if err := t.Ref.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Ref pointer: %w", err) + } + } + + } + // t.AvailableForRetrieval (bool) (bool) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.AvailableForRetrieval = false + case 21: + t.AvailableForRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.DealID (abi.DealID) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.CreationTime (typegen.CborTime) (struct) + + { + + if err := t.CreationTime.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.CreationTime: %w", err) + } + + } + return nil +} + +var lengthBufBalance0 = []byte{130} + +func (t *Balance0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufBalance0); err != nil { + return err + } + + // t.Locked (big.Int) (struct) + if err := t.Locked.MarshalCBOR(cw); err != nil { + return err + } + + // t.Available (big.Int) (struct) + if err := t.Available.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *Balance0) UnmarshalCBOR(r io.Reader) (err error) { + *t = Balance0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Locked (big.Int) (struct) + + { + + if err := t.Locked.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Locked: %w", err) + } + + } + // t.Available (big.Int) (struct) + + { + + if err := t.Available.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Available: %w", err) + } + + } + return nil +} + +var lengthBufSignedStorageAsk0 = []byte{130} + +func (t *SignedStorageAsk0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufSignedStorageAsk0); err != nil { + return err + } + + // t.Ask (migrations.StorageAsk0) (struct) + if err := t.Ask.MarshalCBOR(cw); err != nil { + return err + } + + // t.Signature (crypto.Signature) (struct) + if err := t.Signature.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *SignedStorageAsk0) UnmarshalCBOR(r io.Reader) (err error) { + *t = SignedStorageAsk0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Ask (migrations.StorageAsk0) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Ask = new(StorageAsk0) + if err := t.Ask.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Ask pointer: %w", err) + } + } + + } + // t.Signature (crypto.Signature) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Signature = new(crypto.Signature) + if err := t.Signature.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Signature pointer: %w", err) + } + } + + } + return nil +} + +var lengthBufStorageAsk0 = []byte{136} + +func (t *StorageAsk0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufStorageAsk0); err != nil { + return err + } + + // t.Price (big.Int) (struct) + if err := t.Price.MarshalCBOR(cw); err != nil { + return err + } + + // t.VerifiedPrice (big.Int) (struct) + if err := t.VerifiedPrice.MarshalCBOR(cw); err != nil { + return err + } + + // t.MinPieceSize (abi.PaddedPieceSize) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.MinPieceSize)); err != nil { + return err + } + + // t.MaxPieceSize (abi.PaddedPieceSize) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.MaxPieceSize)); err != nil { + return err + } + + // t.Miner (address.Address) (struct) + if err := t.Miner.MarshalCBOR(cw); err != nil { + return err + } + + // t.Timestamp (abi.ChainEpoch) (int64) + if t.Timestamp >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Timestamp)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Timestamp-1)); err != nil { + return err + } + } + + // t.Expiry (abi.ChainEpoch) (int64) + if t.Expiry >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Expiry)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Expiry-1)); err != nil { + return err + } + } + + // t.SeqNo (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SeqNo)); err != nil { + return err + } + + return nil +} + +func (t *StorageAsk0) UnmarshalCBOR(r io.Reader) (err error) { + *t = StorageAsk0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 8 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Price (big.Int) (struct) + + { + + if err := t.Price.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Price: %w", err) + } + + } + // t.VerifiedPrice (big.Int) (struct) + + { + + if err := t.VerifiedPrice.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.VerifiedPrice: %w", err) + } + + } + // t.MinPieceSize (abi.PaddedPieceSize) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.MinPieceSize = abi.PaddedPieceSize(extra) + + } + // t.MaxPieceSize (abi.PaddedPieceSize) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.MaxPieceSize = abi.PaddedPieceSize(extra) + + } + // t.Miner (address.Address) (struct) + + { + + if err := t.Miner.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Miner: %w", err) + } + + } + // t.Timestamp (abi.ChainEpoch) (int64) + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative overflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Timestamp = abi.ChainEpoch(extraI) + } + // t.Expiry (abi.ChainEpoch) (int64) + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative overflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Expiry = abi.ChainEpoch(extraI) + } + // t.SeqNo (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.SeqNo = uint64(extra) + + } + return nil +} + +var lengthBufDataRef0 = []byte{132} + +func (t *DataRef0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufDataRef0); err != nil { + return err + } + + // t.TransferType (string) (string) + if len(t.TransferType) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.TransferType was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.TransferType))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.TransferType)); err != nil { + return err + } + + // t.Root (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.Root); err != nil { + return xerrors.Errorf("failed to write cid field t.Root: %w", err) + } + + // t.PieceCid (cid.Cid) (struct) + + if t.PieceCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PieceCid); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCid: %w", err) + } + } + + // t.PieceSize (abi.UnpaddedPieceSize) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PieceSize)); err != nil { + return err + } + + return nil +} + +func (t *DataRef0) UnmarshalCBOR(r io.Reader) (err error) { + *t = DataRef0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 4 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.TransferType (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.TransferType = string(sval) + } + // t.Root (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Root: %w", err) + } + + t.Root = c + + } + // t.PieceCid (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCid: %w", err) + } + + t.PieceCid = &c + } + + } + // t.PieceSize (abi.UnpaddedPieceSize) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PieceSize = abi.UnpaddedPieceSize(extra) + + } + return nil +} + +var lengthBufProviderDealState0 = []byte{136} + +func (t *ProviderDealState0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufProviderDealState0); err != nil { + return err + } + + // t.State (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.State)); err != nil { + return err + } + + // t.Message (string) (string) + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Message)); err != nil { + return err + } + + // t.Proposal (market.DealProposal) (struct) + if err := t.Proposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.ProposalCid (cid.Cid) (struct) + + if t.ProposalCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.ProposalCid); err != nil { + return xerrors.Errorf("failed to write cid field t.ProposalCid: %w", err) + } + } + + // t.AddFundsCid (cid.Cid) (struct) + + if t.AddFundsCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.AddFundsCid); err != nil { + return xerrors.Errorf("failed to write cid field t.AddFundsCid: %w", err) + } + } + + // t.PublishCid (cid.Cid) (struct) + + if t.PublishCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PublishCid); err != nil { + return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err) + } + } + + // t.DealID (abi.DealID) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.FastRetrieval (bool) (bool) + if err := cbg.WriteBool(w, t.FastRetrieval); err != nil { + return err + } + return nil +} + +func (t *ProviderDealState0) UnmarshalCBOR(r io.Reader) (err error) { + *t = ProviderDealState0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 8 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.State (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.State = uint64(extra) + + } + // t.Message (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.Proposal (market.DealProposal) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Proposal = new(market.DealProposal) + if err := t.Proposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Proposal pointer: %w", err) + } + } + + } + // t.ProposalCid (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ProposalCid: %w", err) + } + + t.ProposalCid = &c + } + + } + // t.AddFundsCid (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.AddFundsCid: %w", err) + } + + t.AddFundsCid = &c + } + + } + // t.PublishCid (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) + } + + t.PublishCid = &c + } + + } + // t.DealID (abi.DealID) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.FastRetrieval (bool) (bool) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.FastRetrieval = false + case 21: + t.FastRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + return nil +} + +var lengthBufAskRequest0 = []byte{129} + +func (t *AskRequest0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufAskRequest0); err != nil { + return err + } + + // t.Miner (address.Address) (struct) + if err := t.Miner.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *AskRequest0) UnmarshalCBOR(r io.Reader) (err error) { + *t = AskRequest0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Miner (address.Address) (struct) + + { + + if err := t.Miner.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Miner: %w", err) + } + + } + return nil +} + +var lengthBufAskResponse0 = []byte{129} + +func (t *AskResponse0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufAskResponse0); err != nil { + return err + } + + // t.Ask (migrations.SignedStorageAsk0) (struct) + if err := t.Ask.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *AskResponse0) UnmarshalCBOR(r io.Reader) (err error) { + *t = AskResponse0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Ask (migrations.SignedStorageAsk0) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Ask = new(SignedStorageAsk0) + if err := t.Ask.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Ask pointer: %w", err) + } + } + + } + return nil +} + +var lengthBufProposal0 = []byte{131} + +func (t *Proposal0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufProposal0); err != nil { + return err + } + + // t.DealProposal (market.ClientDealProposal) (struct) + if err := t.DealProposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.Piece (migrations.DataRef0) (struct) + if err := t.Piece.MarshalCBOR(cw); err != nil { + return err + } + + // t.FastRetrieval (bool) (bool) + if err := cbg.WriteBool(w, t.FastRetrieval); err != nil { + return err + } + return nil +} + +func (t *Proposal0) UnmarshalCBOR(r io.Reader) (err error) { + *t = Proposal0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.DealProposal (market.ClientDealProposal) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.DealProposal = new(market.ClientDealProposal) + if err := t.DealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealProposal pointer: %w", err) + } + } + + } + // t.Piece (migrations.DataRef0) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Piece = new(DataRef0) + if err := t.Piece.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Piece pointer: %w", err) + } + } + + } + // t.FastRetrieval (bool) (bool) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.FastRetrieval = false + case 21: + t.FastRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + return nil +} + +var lengthBufResponse0 = []byte{132} + +func (t *Response0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufResponse0); err != nil { + return err + } + + // t.State (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.State)); err != nil { + return err + } + + // t.Message (string) (string) + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Message)); err != nil { + return err + } + + // t.Proposal (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.Proposal); err != nil { + return xerrors.Errorf("failed to write cid field t.Proposal: %w", err) + } + + // t.PublishMessage (cid.Cid) (struct) + + if t.PublishMessage == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PublishMessage); err != nil { + return xerrors.Errorf("failed to write cid field t.PublishMessage: %w", err) + } + } + + return nil +} + +func (t *Response0) UnmarshalCBOR(r io.Reader) (err error) { + *t = Response0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 4 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.State (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.State = uint64(extra) + + } + // t.Message (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.Proposal (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Proposal: %w", err) + } + + t.Proposal = c + + } + // t.PublishMessage (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishMessage: %w", err) + } + + t.PublishMessage = &c + } + + } + return nil +} + +var lengthBufSignedResponse0 = []byte{130} + +func (t *SignedResponse0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufSignedResponse0); err != nil { + return err + } + + // t.Response (migrations.Response0) (struct) + if err := t.Response.MarshalCBOR(cw); err != nil { + return err + } + + // t.Signature (crypto.Signature) (struct) + if err := t.Signature.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *SignedResponse0) UnmarshalCBOR(r io.Reader) (err error) { + *t = SignedResponse0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Response (migrations.Response0) (struct) + + { + + if err := t.Response.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Response: %w", err) + } + + } + // t.Signature (crypto.Signature) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Signature = new(crypto.Signature) + if err := t.Signature.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Signature pointer: %w", err) + } + } + + } + return nil +} + +var lengthBufDealStatusRequest0 = []byte{130} + +func (t *DealStatusRequest0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufDealStatusRequest0); err != nil { + return err + } + + // t.Proposal (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.Proposal); err != nil { + return xerrors.Errorf("failed to write cid field t.Proposal: %w", err) + } + + // t.Signature (crypto.Signature) (struct) + if err := t.Signature.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *DealStatusRequest0) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealStatusRequest0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Proposal (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Proposal: %w", err) + } + + t.Proposal = c + + } + // t.Signature (crypto.Signature) (struct) + + { + + if err := t.Signature.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Signature: %w", err) + } + + } + return nil +} + +var lengthBufDealStatusResponse0 = []byte{130} + +func (t *DealStatusResponse0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufDealStatusResponse0); err != nil { + return err + } + + // t.DealState (migrations.ProviderDealState0) (struct) + if err := t.DealState.MarshalCBOR(cw); err != nil { + return err + } + + // t.Signature (crypto.Signature) (struct) + if err := t.Signature.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *DealStatusResponse0) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealStatusResponse0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.DealState (migrations.ProviderDealState0) (struct) + + { + + if err := t.DealState.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealState: %w", err) + } + + } + // t.Signature (crypto.Signature) (struct) + + { + + if err := t.Signature.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Signature: %w", err) + } + + } + return nil +} diff --git a/storagemarket/types/legacytypes/migrations/migrations_mapenc_types.go b/storagemarket/types/legacytypes/migrations/migrations_mapenc_types.go new file mode 100644 index 000000000..84bc456be --- /dev/null +++ b/storagemarket/types/legacytypes/migrations/migrations_mapenc_types.go @@ -0,0 +1,55 @@ +package migrations + +import ( + "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/storagemarket/types/legacytypes" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-state-types/abi" + marketOld "github.com/filecoin-project/specs-actors/actors/builtin/market" + + "github.com/filecoin-project/boost/storagemarket/types/legacytypes/filestore" +) + +// Some of the types in the migrations file are CBOR array-encoded, and some +// are map-encoded. The --map-encoding parameter must be specified in a +// generate directive in a separate file. So we define CBOR map-encoded types +// in this file + +//go:generate cbor-gen-for --map-encoding Proposal1 MinerDeal1 + +// Proposal1 is version 1 of Proposal (used by deal proposal protocol v1.1.0) +type Proposal1 struct { + DealProposal *marketOld.ClientDealProposal + Piece *legacytypes.DataRef + FastRetrieval bool +} + +// MinerDeal1 is version 1 of MinerDeal +type MinerDeal1 struct { + marketOld.ClientDealProposal + ProposalCid cid.Cid + AddFundsCid *cid.Cid + PublishCid *cid.Cid + Miner peer.ID + Client peer.ID + State legacytypes.StorageDealStatus + PiecePath filestore.Path + MetadataPath filestore.Path + SlashEpoch abi.ChainEpoch + FastRetrieval bool + Message string + FundsReserved abi.TokenAmount + Ref *legacytypes.DataRef + AvailableForRetrieval bool + + DealID abi.DealID + CreationTime cbg.CborTime + + TransferChannelId *datatransfer.ChannelID + SectorNumber abi.SectorNumber + + InboundCAR string +} diff --git a/storagemarket/types/legacytypes/migrations/migrations_mapenc_types_cbor_gen.go b/storagemarket/types/legacytypes/migrations/migrations_mapenc_types_cbor_gen.go new file mode 100644 index 000000000..0b1cbf2a8 --- /dev/null +++ b/storagemarket/types/legacytypes/migrations/migrations_mapenc_types_cbor_gen.go @@ -0,0 +1,936 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package migrations + +import ( + "fmt" + "io" + "math" + "sort" + + "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/storagemarket/types/legacytypes" + "github.com/filecoin-project/boost/storagemarket/types/legacytypes/filestore" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-actors/actors/builtin/market" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +func (t *Proposal1) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{163}); err != nil { + return err + } + + // t.Piece (legacytypes.DataRef) (struct) + if len("Piece") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Piece\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Piece"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Piece")); err != nil { + return err + } + + if err := t.Piece.MarshalCBOR(cw); err != nil { + return err + } + + // t.DealProposal (market.ClientDealProposal) (struct) + if len("DealProposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealProposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealProposal"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealProposal")); err != nil { + return err + } + + if err := t.DealProposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.FastRetrieval (bool) (bool) + if len("FastRetrieval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FastRetrieval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("FastRetrieval"))); err != nil { + return err + } + if _, err := cw.WriteString(string("FastRetrieval")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.FastRetrieval); err != nil { + return err + } + return nil +} + +func (t *Proposal1) UnmarshalCBOR(r io.Reader) (err error) { + *t = Proposal1{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("Proposal1: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Piece (legacytypes.DataRef) (struct) + case "Piece": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Piece = new(legacytypes.DataRef) + if err := t.Piece.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Piece pointer: %w", err) + } + } + + } + // t.DealProposal (market.ClientDealProposal) (struct) + case "DealProposal": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.DealProposal = new(market.ClientDealProposal) + if err := t.DealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealProposal pointer: %w", err) + } + } + + } + // t.FastRetrieval (bool) (bool) + case "FastRetrieval": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.FastRetrieval = false + case 21: + t.FastRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *MinerDeal1) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{180}); err != nil { + return err + } + + // t.Ref (legacytypes.DataRef) (struct) + if len("Ref") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Ref\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Ref"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Ref")); err != nil { + return err + } + + if err := t.Ref.MarshalCBOR(cw); err != nil { + return err + } + + // t.Miner (peer.ID) (string) + if len("Miner") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Miner\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Miner"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Miner")); err != nil { + return err + } + + if len(t.Miner) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Miner was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Miner))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Miner)); err != nil { + return err + } + + // t.State (uint64) (uint64) + if len("State") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"State\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("State"))); err != nil { + return err + } + if _, err := cw.WriteString(string("State")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.State)); err != nil { + return err + } + + // t.Client (peer.ID) (string) + if len("Client") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Client\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Client"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Client")); err != nil { + return err + } + + if len(t.Client) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Client was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Client))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Client)); err != nil { + return err + } + + // t.DealID (abi.DealID) (uint64) + if len("DealID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Message)); err != nil { + return err + } + + // t.PiecePath (filestore.Path) (string) + if len("PiecePath") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PiecePath\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PiecePath"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PiecePath")); err != nil { + return err + } + + if len(t.PiecePath) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.PiecePath was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.PiecePath))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.PiecePath)); err != nil { + return err + } + + // t.InboundCAR (string) (string) + if len("InboundCAR") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"InboundCAR\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("InboundCAR"))); err != nil { + return err + } + if _, err := cw.WriteString(string("InboundCAR")); err != nil { + return err + } + + if len(t.InboundCAR) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.InboundCAR was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.InboundCAR))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.InboundCAR)); err != nil { + return err + } + + // t.PublishCid (cid.Cid) (struct) + if len("PublishCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PublishCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PublishCid"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PublishCid")); err != nil { + return err + } + + if t.PublishCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PublishCid); err != nil { + return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err) + } + } + + // t.SlashEpoch (abi.ChainEpoch) (int64) + if len("SlashEpoch") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SlashEpoch\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SlashEpoch"))); err != nil { + return err + } + if _, err := cw.WriteString(string("SlashEpoch")); err != nil { + return err + } + + if t.SlashEpoch >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SlashEpoch)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.SlashEpoch-1)); err != nil { + return err + } + } + + // t.AddFundsCid (cid.Cid) (struct) + if len("AddFundsCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"AddFundsCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("AddFundsCid"))); err != nil { + return err + } + if _, err := cw.WriteString(string("AddFundsCid")); err != nil { + return err + } + + if t.AddFundsCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.AddFundsCid); err != nil { + return xerrors.Errorf("failed to write cid field t.AddFundsCid: %w", err) + } + } + + // t.ProposalCid (cid.Cid) (struct) + if len("ProposalCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ProposalCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ProposalCid"))); err != nil { + return err + } + if _, err := cw.WriteString(string("ProposalCid")); err != nil { + return err + } + + if err := cbg.WriteCid(cw, t.ProposalCid); err != nil { + return xerrors.Errorf("failed to write cid field t.ProposalCid: %w", err) + } + + // t.CreationTime (typegen.CborTime) (struct) + if len("CreationTime") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CreationTime\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("CreationTime"))); err != nil { + return err + } + if _, err := cw.WriteString(string("CreationTime")); err != nil { + return err + } + + if err := t.CreationTime.MarshalCBOR(cw); err != nil { + return err + } + + // t.MetadataPath (filestore.Path) (string) + if len("MetadataPath") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MetadataPath\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("MetadataPath"))); err != nil { + return err + } + if _, err := cw.WriteString(string("MetadataPath")); err != nil { + return err + } + + if len(t.MetadataPath) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.MetadataPath was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.MetadataPath))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.MetadataPath)); err != nil { + return err + } + + // t.SectorNumber (abi.SectorNumber) (uint64) + if len("SectorNumber") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SectorNumber\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SectorNumber"))); err != nil { + return err + } + if _, err := cw.WriteString(string("SectorNumber")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SectorNumber)); err != nil { + return err + } + + // t.FastRetrieval (bool) (bool) + if len("FastRetrieval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FastRetrieval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("FastRetrieval"))); err != nil { + return err + } + if _, err := cw.WriteString(string("FastRetrieval")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.FastRetrieval); err != nil { + return err + } + + // t.FundsReserved (big.Int) (struct) + if len("FundsReserved") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FundsReserved\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("FundsReserved"))); err != nil { + return err + } + if _, err := cw.WriteString(string("FundsReserved")); err != nil { + return err + } + + if err := t.FundsReserved.MarshalCBOR(cw); err != nil { + return err + } + + // t.TransferChannelId (datatransfer.ChannelID) (struct) + if len("TransferChannelId") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TransferChannelId\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("TransferChannelId"))); err != nil { + return err + } + if _, err := cw.WriteString(string("TransferChannelId")); err != nil { + return err + } + + if err := t.TransferChannelId.MarshalCBOR(cw); err != nil { + return err + } + + // t.ClientDealProposal (market.ClientDealProposal) (struct) + if len("ClientDealProposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ClientDealProposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ClientDealProposal"))); err != nil { + return err + } + if _, err := cw.WriteString(string("ClientDealProposal")); err != nil { + return err + } + + if err := t.ClientDealProposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.AvailableForRetrieval (bool) (bool) + if len("AvailableForRetrieval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"AvailableForRetrieval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("AvailableForRetrieval"))); err != nil { + return err + } + if _, err := cw.WriteString(string("AvailableForRetrieval")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.AvailableForRetrieval); err != nil { + return err + } + return nil +} + +func (t *MinerDeal1) UnmarshalCBOR(r io.Reader) (err error) { + *t = MinerDeal1{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("MinerDeal1: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Ref (legacytypes.DataRef) (struct) + case "Ref": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Ref = new(legacytypes.DataRef) + if err := t.Ref.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Ref pointer: %w", err) + } + } + + } + // t.Miner (peer.ID) (string) + case "Miner": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Miner = peer.ID(sval) + } + // t.State (uint64) (uint64) + case "State": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.State = uint64(extra) + + } + // t.Client (peer.ID) (string) + case "Client": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Client = peer.ID(sval) + } + // t.DealID (abi.DealID) (uint64) + case "DealID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.PiecePath (filestore.Path) (string) + case "PiecePath": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.PiecePath = filestore.Path(sval) + } + // t.InboundCAR (string) (string) + case "InboundCAR": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.InboundCAR = string(sval) + } + // t.PublishCid (cid.Cid) (struct) + case "PublishCid": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) + } + + t.PublishCid = &c + } + + } + // t.SlashEpoch (abi.ChainEpoch) (int64) + case "SlashEpoch": + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative overflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.SlashEpoch = abi.ChainEpoch(extraI) + } + // t.AddFundsCid (cid.Cid) (struct) + case "AddFundsCid": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.AddFundsCid: %w", err) + } + + t.AddFundsCid = &c + } + + } + // t.ProposalCid (cid.Cid) (struct) + case "ProposalCid": + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ProposalCid: %w", err) + } + + t.ProposalCid = c + + } + // t.CreationTime (typegen.CborTime) (struct) + case "CreationTime": + + { + + if err := t.CreationTime.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.CreationTime: %w", err) + } + + } + // t.MetadataPath (filestore.Path) (string) + case "MetadataPath": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.MetadataPath = filestore.Path(sval) + } + // t.SectorNumber (abi.SectorNumber) (uint64) + case "SectorNumber": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.SectorNumber = abi.SectorNumber(extra) + + } + // t.FastRetrieval (bool) (bool) + case "FastRetrieval": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.FastRetrieval = false + case 21: + t.FastRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.FundsReserved (big.Int) (struct) + case "FundsReserved": + + { + + if err := t.FundsReserved.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.FundsReserved: %w", err) + } + + } + // t.TransferChannelId (datatransfer.ChannelID) (struct) + case "TransferChannelId": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.TransferChannelId = new(datatransfer.ChannelID) + if err := t.TransferChannelId.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.TransferChannelId pointer: %w", err) + } + } + + } + // t.ClientDealProposal (market.ClientDealProposal) (struct) + case "ClientDealProposal": + + { + + if err := t.ClientDealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ClientDealProposal: %w", err) + } + + } + // t.AvailableForRetrieval (bool) (bool) + case "AvailableForRetrieval": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.AvailableForRetrieval = false + case 21: + t.AvailableForRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} diff --git a/storagemarket/types/legacytypes/network/types.go b/storagemarket/types/legacytypes/network/types.go new file mode 100644 index 000000000..604e0801f --- /dev/null +++ b/storagemarket/types/legacytypes/network/types.go @@ -0,0 +1,79 @@ +package network + +import ( + "github.com/filecoin-project/boost/storagemarket/types/legacytypes" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/ipfs/go-cid" +) + +//go:generate cbor-gen-for --map-encoding AskRequest AskResponse Proposal Response SignedResponse DealStatusRequest DealStatusResponse + +// Proposal is the data sent over the network from client to provider when proposing +// a deal +type Proposal struct { + DealProposal *market.ClientDealProposal + Piece *legacytypes.DataRef + FastRetrieval bool +} + +// ProposalUndefined is an empty Proposal message +var ProposalUndefined = Proposal{} + +// Response is a response to a proposal sent over the network +type Response struct { + State legacytypes.StorageDealStatus + + // DealProposalRejected + Message string + Proposal cid.Cid + + // StorageDealProposalAccepted + PublishMessage *cid.Cid +} + +// SignedResponse is a response that is signed +type SignedResponse struct { + Response Response + + Signature *crypto.Signature +} + +// SignedResponseUndefined represents an empty SignedResponse message +var SignedResponseUndefined = SignedResponse{} + +// AskRequest is a request for current ask parameters for a given miner +type AskRequest struct { + Miner address.Address +} + +// AskRequestUndefined represents and empty AskRequest message +var AskRequestUndefined = AskRequest{} + +// AskResponse is the response sent over the network in response +// to an ask request +type AskResponse struct { + Ask *legacytypes.SignedStorageAsk +} + +// AskResponseUndefined represents an empty AskResponse message +var AskResponseUndefined = AskResponse{} + +// DealStatusRequest sent by a client to query deal status +type DealStatusRequest struct { + Proposal cid.Cid + Signature crypto.Signature +} + +// DealStatusRequestUndefined represents an empty DealStatusRequest message +var DealStatusRequestUndefined = DealStatusRequest{} + +// DealStatusResponse is a provider's response to DealStatusRequest +type DealStatusResponse struct { + DealState legacytypes.ProviderDealState + Signature crypto.Signature +} + +// DealStatusResponseUndefined represents an empty DealStatusResponse message +var DealStatusResponseUndefined = DealStatusResponse{} diff --git a/storagemarket/types/legacytypes/network/types_cbor_gen.go b/storagemarket/types/legacytypes/network/types_cbor_gen.go new file mode 100644 index 000000000..e002fd641 --- /dev/null +++ b/storagemarket/types/legacytypes/network/types_cbor_gen.go @@ -0,0 +1,927 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package network + +import ( + "fmt" + "io" + "math" + "sort" + + legacytypes "github.com/filecoin-project/boost/storagemarket/types/legacytypes" + market "github.com/filecoin-project/go-state-types/builtin/v9/market" + crypto "github.com/filecoin-project/go-state-types/crypto" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +func (t *AskRequest) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{161}); err != nil { + return err + } + + // t.Miner (address.Address) (struct) + if len("Miner") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Miner\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Miner"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Miner")); err != nil { + return err + } + + if err := t.Miner.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *AskRequest) UnmarshalCBOR(r io.Reader) (err error) { + *t = AskRequest{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("AskRequest: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Miner (address.Address) (struct) + case "Miner": + + { + + if err := t.Miner.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Miner: %w", err) + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *AskResponse) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{161}); err != nil { + return err + } + + // t.Ask (legacytypes.SignedStorageAsk) (struct) + if len("Ask") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Ask\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Ask"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Ask")); err != nil { + return err + } + + if err := t.Ask.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *AskResponse) UnmarshalCBOR(r io.Reader) (err error) { + *t = AskResponse{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("AskResponse: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Ask (legacytypes.SignedStorageAsk) (struct) + case "Ask": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Ask = new(legacytypes.SignedStorageAsk) + if err := t.Ask.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Ask pointer: %w", err) + } + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *Proposal) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{163}); err != nil { + return err + } + + // t.Piece (legacytypes.DataRef) (struct) + if len("Piece") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Piece\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Piece"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Piece")); err != nil { + return err + } + + if err := t.Piece.MarshalCBOR(cw); err != nil { + return err + } + + // t.DealProposal (market.ClientDealProposal) (struct) + if len("DealProposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealProposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealProposal"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealProposal")); err != nil { + return err + } + + if err := t.DealProposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.FastRetrieval (bool) (bool) + if len("FastRetrieval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FastRetrieval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("FastRetrieval"))); err != nil { + return err + } + if _, err := cw.WriteString(string("FastRetrieval")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.FastRetrieval); err != nil { + return err + } + return nil +} + +func (t *Proposal) UnmarshalCBOR(r io.Reader) (err error) { + *t = Proposal{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("Proposal: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Piece (legacytypes.DataRef) (struct) + case "Piece": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Piece = new(legacytypes.DataRef) + if err := t.Piece.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Piece pointer: %w", err) + } + } + + } + // t.DealProposal (market.ClientDealProposal) (struct) + case "DealProposal": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.DealProposal = new(market.ClientDealProposal) + if err := t.DealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealProposal pointer: %w", err) + } + } + + } + // t.FastRetrieval (bool) (bool) + case "FastRetrieval": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.FastRetrieval = false + case 21: + t.FastRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *Response) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{164}); err != nil { + return err + } + + // t.State (uint64) (uint64) + if len("State") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"State\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("State"))); err != nil { + return err + } + if _, err := cw.WriteString(string("State")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.State)); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Message)); err != nil { + return err + } + + // t.Proposal (cid.Cid) (struct) + if len("Proposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Proposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Proposal"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Proposal")); err != nil { + return err + } + + if err := cbg.WriteCid(cw, t.Proposal); err != nil { + return xerrors.Errorf("failed to write cid field t.Proposal: %w", err) + } + + // t.PublishMessage (cid.Cid) (struct) + if len("PublishMessage") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PublishMessage\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PublishMessage"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PublishMessage")); err != nil { + return err + } + + if t.PublishMessage == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PublishMessage); err != nil { + return xerrors.Errorf("failed to write cid field t.PublishMessage: %w", err) + } + } + + return nil +} + +func (t *Response) UnmarshalCBOR(r io.Reader) (err error) { + *t = Response{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("Response: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.State (uint64) (uint64) + case "State": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.State = uint64(extra) + + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.Proposal (cid.Cid) (struct) + case "Proposal": + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Proposal: %w", err) + } + + t.Proposal = c + + } + // t.PublishMessage (cid.Cid) (struct) + case "PublishMessage": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishMessage: %w", err) + } + + t.PublishMessage = &c + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *SignedResponse) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.Response (network.Response) (struct) + if len("Response") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Response\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Response"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Response")); err != nil { + return err + } + + if err := t.Response.MarshalCBOR(cw); err != nil { + return err + } + + // t.Signature (crypto.Signature) (struct) + if len("Signature") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Signature\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Signature"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Signature")); err != nil { + return err + } + + if err := t.Signature.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *SignedResponse) UnmarshalCBOR(r io.Reader) (err error) { + *t = SignedResponse{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("SignedResponse: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Response (network.Response) (struct) + case "Response": + + { + + if err := t.Response.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Response: %w", err) + } + + } + // t.Signature (crypto.Signature) (struct) + case "Signature": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Signature = new(crypto.Signature) + if err := t.Signature.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Signature pointer: %w", err) + } + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DealStatusRequest) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.Proposal (cid.Cid) (struct) + if len("Proposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Proposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Proposal"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Proposal")); err != nil { + return err + } + + if err := cbg.WriteCid(cw, t.Proposal); err != nil { + return xerrors.Errorf("failed to write cid field t.Proposal: %w", err) + } + + // t.Signature (crypto.Signature) (struct) + if len("Signature") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Signature\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Signature"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Signature")); err != nil { + return err + } + + if err := t.Signature.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *DealStatusRequest) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealStatusRequest{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealStatusRequest: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Proposal (cid.Cid) (struct) + case "Proposal": + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Proposal: %w", err) + } + + t.Proposal = c + + } + // t.Signature (crypto.Signature) (struct) + case "Signature": + + { + + if err := t.Signature.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Signature: %w", err) + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DealStatusResponse) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.DealState (legacytypes.ProviderDealState) (struct) + if len("DealState") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealState\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealState"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealState")); err != nil { + return err + } + + if err := t.DealState.MarshalCBOR(cw); err != nil { + return err + } + + // t.Signature (crypto.Signature) (struct) + if len("Signature") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Signature\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Signature"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Signature")); err != nil { + return err + } + + if err := t.Signature.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *DealStatusResponse) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealStatusResponse{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealStatusResponse: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.DealState (legacytypes.ProviderDealState) (struct) + case "DealState": + + { + + if err := t.DealState.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealState: %w", err) + } + + } + // t.Signature (crypto.Signature) (struct) + case "Signature": + + { + + if err := t.Signature.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Signature: %w", err) + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} diff --git a/storagemarket/types/legacytypes/types.go b/storagemarket/types/legacytypes/types.go new file mode 100644 index 000000000..440a898c7 --- /dev/null +++ b/storagemarket/types/legacytypes/types.go @@ -0,0 +1,319 @@ +package legacytypes + +import ( + "fmt" + "time" + + "github.com/filecoin-project/boost/datatransfer" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/boost/storagemarket/types/legacytypes/filestore" +) + +var log = logging.Logger("storagemrkt") + +//go:generate cbor-gen-for --map-encoding ClientDeal MinerDeal Balance SignedStorageAsk StorageAsk DataRef ProviderDealState DealStages DealStage Log + +// The ID for the libp2p protocol for proposing storage deals. +const DealProtocolID101 = "/fil/storage/mk/1.0.1" +const DealProtocolID110 = "/fil/storage/mk/1.1.0" +const DealProtocolID111 = "/fil/storage/mk/1.1.1" + +// AskProtocolID is the ID for the libp2p protocol for querying miners for their current StorageAsk. +const OldAskProtocolID = "/fil/storage/ask/1.0.1" +const AskProtocolID = "/fil/storage/ask/1.1.0" + +// DealStatusProtocolID is the ID for the libp2p protocol for querying miners for the current status of a deal. +const OldDealStatusProtocolID = "/fil/storage/status/1.0.1" +const DealStatusProtocolID = "/fil/storage/status/1.1.0" + +// Balance represents a current balance of funds in the StorageMarketActor. +type Balance struct { + Locked abi.TokenAmount + Available abi.TokenAmount +} + +// StorageAsk defines the parameters by which a miner will choose to accept or +// reject a deal. Note: making a storage deal proposal which matches the miner's +// ask is a precondition, but not sufficient to ensure the deal is accepted (the +// storage provider may run its own decision logic). +type StorageAsk struct { + // Price per GiB / Epoch + Price abi.TokenAmount + VerifiedPrice abi.TokenAmount + + MinPieceSize abi.PaddedPieceSize + MaxPieceSize abi.PaddedPieceSize + Miner address.Address + Timestamp abi.ChainEpoch + Expiry abi.ChainEpoch + SeqNo uint64 +} + +// SignedStorageAsk is an ask signed by the miner's private key +type SignedStorageAsk struct { + Ask *StorageAsk + Signature *crypto.Signature +} + +// SignedStorageAskUndefined represents the empty value for SignedStorageAsk +var SignedStorageAskUndefined = SignedStorageAsk{} + +// StorageAskOption allows custom configuration of a storage ask +type StorageAskOption func(*StorageAsk) + +// MinPieceSize configures a minimum piece size of a StorageAsk +func MinPieceSize(minPieceSize abi.PaddedPieceSize) StorageAskOption { + return func(sa *StorageAsk) { + sa.MinPieceSize = minPieceSize + } +} + +// MaxPieceSize configures maxiumum piece size of a StorageAsk +func MaxPieceSize(maxPieceSize abi.PaddedPieceSize) StorageAskOption { + return func(sa *StorageAsk) { + sa.MaxPieceSize = maxPieceSize + } +} + +// StorageAskUndefined represents an empty value for StorageAsk +var StorageAskUndefined = StorageAsk{} + +type ClientDealProposal = market.ClientDealProposal + +// MinerDeal is the local state tracked for a deal by a StorageProvider +type MinerDeal struct { + ClientDealProposal + ProposalCid cid.Cid + AddFundsCid *cid.Cid + PublishCid *cid.Cid + Miner peer.ID + Client peer.ID + State StorageDealStatus + PiecePath filestore.Path + MetadataPath filestore.Path + SlashEpoch abi.ChainEpoch + FastRetrieval bool + Message string + FundsReserved abi.TokenAmount + Ref *DataRef + AvailableForRetrieval bool + + DealID abi.DealID + CreationTime cbg.CborTime + + TransferChannelId *datatransfer.ChannelID + SectorNumber abi.SectorNumber + + InboundCAR string +} + +// NewDealStages creates a new DealStages object ready to be used. +// EXPERIMENTAL; subject to change. +func NewDealStages() *DealStages { + return &DealStages{} +} + +// DealStages captures a timeline of the progress of a deal, grouped by stages. +// EXPERIMENTAL; subject to change. +type DealStages struct { + // Stages contains an entry for every stage that the deal has gone through. + // Each stage then contains logs. + Stages []*DealStage +} + +// DealStages captures data about the execution of a deal stage. +// EXPERIMENTAL; subject to change. +type DealStage struct { + // Human-readable fields. + // TODO: these _will_ need to be converted to canonical representations, so + // they are machine readable. + Name string + Description string + ExpectedDuration string + + // Timestamps. + // TODO: may be worth adding an exit timestamp. It _could_ be inferred from + // the start of the next stage, or from the timestamp of the last log line + // if this is a terminal stage. But that's non-determistic and it relies on + // assumptions. + CreatedTime cbg.CborTime + UpdatedTime cbg.CborTime + + // Logs contains a detailed timeline of events that occurred inside + // this stage. + Logs []*Log +} + +// Log represents a point-in-time event that occurred inside a deal stage. +// EXPERIMENTAL; subject to change. +type Log struct { + // Log is a human readable message. + // + // TODO: this _may_ need to be converted to a canonical data model so it + // is machine-readable. + Log string + + UpdatedTime cbg.CborTime +} + +// GetStage returns the DealStage object for a named stage, or nil if not found. +// +// TODO: the input should be a strongly-typed enum instead of a free-form string. +// TODO: drop Get from GetStage to make this code more idiomatic. Return a +// second ok boolean to make it even more idiomatic. +// EXPERIMENTAL; subject to change. +func (ds *DealStages) GetStage(stage string) *DealStage { + if ds == nil { + return nil + } + + for _, s := range ds.Stages { + if s.Name == stage { + return s + } + } + + return nil +} + +// AddStageLog adds a log to the specified stage, creating the stage if it +// doesn't exist yet. +// EXPERIMENTAL; subject to change. +func (ds *DealStages) AddStageLog(stage, description, expectedDuration, msg string) { + if ds == nil { + return + } + + log.Debugf("adding log for stage <%s> msg <%s>", stage, msg) + + now := curTime() + st := ds.GetStage(stage) + if st == nil { + st = &DealStage{ + CreatedTime: now, + } + ds.Stages = append(ds.Stages, st) + } + + st.Name = stage + st.Description = description + st.ExpectedDuration = expectedDuration + st.UpdatedTime = now + if msg != "" && (len(st.Logs) == 0 || st.Logs[len(st.Logs)-1].Log != msg) { + // only add the log if it's not a duplicate. + st.Logs = append(st.Logs, &Log{msg, now}) + } +} + +// AddLog adds a log inside the DealStages object of the deal. +// EXPERIMENTAL; subject to change. +func (d *ClientDeal) AddLog(msg string, a ...interface{}) { + if len(a) > 0 { + msg = fmt.Sprintf(msg, a...) + } + + stage := DealStates[d.State] + description := DealStatesDescriptions[d.State] + expectedDuration := DealStatesDurations[d.State] + + d.DealStages.AddStageLog(stage, description, expectedDuration, msg) +} + +// ClientDeal is the local state tracked for a deal by a StorageClient +type ClientDeal struct { + market.ClientDealProposal + ProposalCid cid.Cid + AddFundsCid *cid.Cid + State StorageDealStatus + Miner peer.ID + MinerWorker address.Address + DealID abi.DealID + DataRef *DataRef + Message string + DealStages *DealStages + PublishMessage *cid.Cid + SlashEpoch abi.ChainEpoch + PollRetryCount uint64 + PollErrorCount uint64 + FastRetrieval bool + FundsReserved abi.TokenAmount + CreationTime cbg.CborTime + TransferChannelID *datatransfer.ChannelID + SectorNumber abi.SectorNumber +} + +// StorageProviderInfo describes on chain information about a StorageProvider +// (use QueryAsk to determine more specific deal parameters) +type StorageProviderInfo struct { + Address address.Address // actor address + Owner address.Address + Worker address.Address // signs messages + SectorSize uint64 + PeerID peer.ID + Addrs []ma.Multiaddr +} + +// ProposeStorageDealResult returns the result for a proposing a deal +type ProposeStorageDealResult struct { + ProposalCid cid.Cid +} + +// ProposeStorageDealParams describes the parameters for proposing a storage deal +type ProposeStorageDealParams struct { + Addr address.Address + Info *StorageProviderInfo + Data *DataRef + StartEpoch abi.ChainEpoch + EndEpoch abi.ChainEpoch + Price abi.TokenAmount + Collateral abi.TokenAmount + Rt abi.RegisteredSealProof + FastRetrieval bool + VerifiedDeal bool +} + +const ( + // TTGraphsync means data for a deal will be transferred by graphsync + TTGraphsync = "graphsync" + + // TTManual means data for a deal will be transferred manually and imported + // on the provider + TTManual = "manual" +) + +// DataRef is a reference for how data will be transferred for a given storage deal +type DataRef struct { + TransferType string + Root cid.Cid + + PieceCid *cid.Cid // Optional for non-manual transfer, will be recomputed from the data if not given + PieceSize abi.UnpaddedPieceSize // Optional for non-manual transfer, will be recomputed from the data if not given + RawBlockSize uint64 // Optional: used as the denominator when calculating transfer % +} + +// ProviderDealState represents a Provider's current state of a deal +type ProviderDealState struct { + State StorageDealStatus + Message string + Proposal *market.DealProposal + ProposalCid *cid.Cid + AddFundsCid *cid.Cid + PublishCid *cid.Cid + DealID abi.DealID + FastRetrieval bool +} + +func curTime() cbg.CborTime { + now := time.Now() + return cbg.CborTime(time.Unix(0, now.UnixNano()).UTC()) +} diff --git a/storagemarket/types/legacytypes/types_cbor_gen.go b/storagemarket/types/legacytypes/types_cbor_gen.go new file mode 100644 index 000000000..0b2b3d00c --- /dev/null +++ b/storagemarket/types/legacytypes/types_cbor_gen.go @@ -0,0 +1,3139 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package legacytypes + +import ( + "fmt" + "io" + "math" + "sort" + + "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/storagemarket/types/legacytypes/filestore" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +func (t *ClientDeal) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{179}); err != nil { + return err + } + + // t.Miner (peer.ID) (string) + if len("Miner") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Miner\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Miner"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Miner")); err != nil { + return err + } + + if len(t.Miner) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Miner was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Miner))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Miner)); err != nil { + return err + } + + // t.State (uint64) (uint64) + if len("State") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"State\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("State"))); err != nil { + return err + } + if _, err := cw.WriteString(string("State")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.State)); err != nil { + return err + } + + // t.DealID (abi.DealID) (uint64) + if len("DealID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.DataRef (legacytypes.DataRef) (struct) + if len("DataRef") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DataRef\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DataRef"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DataRef")); err != nil { + return err + } + + if err := t.DataRef.MarshalCBOR(cw); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Message)); err != nil { + return err + } + + // t.DealStages (legacytypes.DealStages) (struct) + if len("DealStages") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealStages\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealStages"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealStages")); err != nil { + return err + } + + if err := t.DealStages.MarshalCBOR(cw); err != nil { + return err + } + + // t.SlashEpoch (abi.ChainEpoch) (int64) + if len("SlashEpoch") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SlashEpoch\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SlashEpoch"))); err != nil { + return err + } + if _, err := cw.WriteString(string("SlashEpoch")); err != nil { + return err + } + + if t.SlashEpoch >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SlashEpoch)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.SlashEpoch-1)); err != nil { + return err + } + } + + // t.AddFundsCid (cid.Cid) (struct) + if len("AddFundsCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"AddFundsCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("AddFundsCid"))); err != nil { + return err + } + if _, err := cw.WriteString(string("AddFundsCid")); err != nil { + return err + } + + if t.AddFundsCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.AddFundsCid); err != nil { + return xerrors.Errorf("failed to write cid field t.AddFundsCid: %w", err) + } + } + + // t.MinerWorker (address.Address) (struct) + if len("MinerWorker") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MinerWorker\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("MinerWorker"))); err != nil { + return err + } + if _, err := cw.WriteString(string("MinerWorker")); err != nil { + return err + } + + if err := t.MinerWorker.MarshalCBOR(cw); err != nil { + return err + } + + // t.ProposalCid (cid.Cid) (struct) + if len("ProposalCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ProposalCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ProposalCid"))); err != nil { + return err + } + if _, err := cw.WriteString(string("ProposalCid")); err != nil { + return err + } + + if err := cbg.WriteCid(cw, t.ProposalCid); err != nil { + return xerrors.Errorf("failed to write cid field t.ProposalCid: %w", err) + } + + // t.CreationTime (typegen.CborTime) (struct) + if len("CreationTime") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CreationTime\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("CreationTime"))); err != nil { + return err + } + if _, err := cw.WriteString(string("CreationTime")); err != nil { + return err + } + + if err := t.CreationTime.MarshalCBOR(cw); err != nil { + return err + } + + // t.SectorNumber (abi.SectorNumber) (uint64) + if len("SectorNumber") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SectorNumber\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SectorNumber"))); err != nil { + return err + } + if _, err := cw.WriteString(string("SectorNumber")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SectorNumber)); err != nil { + return err + } + + // t.FastRetrieval (bool) (bool) + if len("FastRetrieval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FastRetrieval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("FastRetrieval"))); err != nil { + return err + } + if _, err := cw.WriteString(string("FastRetrieval")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.FastRetrieval); err != nil { + return err + } + + // t.FundsReserved (big.Int) (struct) + if len("FundsReserved") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FundsReserved\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("FundsReserved"))); err != nil { + return err + } + if _, err := cw.WriteString(string("FundsReserved")); err != nil { + return err + } + + if err := t.FundsReserved.MarshalCBOR(cw); err != nil { + return err + } + + // t.PollErrorCount (uint64) (uint64) + if len("PollErrorCount") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PollErrorCount\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PollErrorCount"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PollErrorCount")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PollErrorCount)); err != nil { + return err + } + + // t.PollRetryCount (uint64) (uint64) + if len("PollRetryCount") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PollRetryCount\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PollRetryCount"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PollRetryCount")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PollRetryCount)); err != nil { + return err + } + + // t.PublishMessage (cid.Cid) (struct) + if len("PublishMessage") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PublishMessage\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PublishMessage"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PublishMessage")); err != nil { + return err + } + + if t.PublishMessage == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PublishMessage); err != nil { + return xerrors.Errorf("failed to write cid field t.PublishMessage: %w", err) + } + } + + // t.TransferChannelID (datatransfer.ChannelID) (struct) + if len("TransferChannelID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TransferChannelID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("TransferChannelID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("TransferChannelID")); err != nil { + return err + } + + if err := t.TransferChannelID.MarshalCBOR(cw); err != nil { + return err + } + + // t.ClientDealProposal (market.ClientDealProposal) (struct) + if len("ClientDealProposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ClientDealProposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ClientDealProposal"))); err != nil { + return err + } + if _, err := cw.WriteString(string("ClientDealProposal")); err != nil { + return err + } + + if err := t.ClientDealProposal.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *ClientDeal) UnmarshalCBOR(r io.Reader) (err error) { + *t = ClientDeal{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("ClientDeal: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Miner (peer.ID) (string) + case "Miner": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Miner = peer.ID(sval) + } + // t.State (uint64) (uint64) + case "State": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.State = uint64(extra) + + } + // t.DealID (abi.DealID) (uint64) + case "DealID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.DataRef (legacytypes.DataRef) (struct) + case "DataRef": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.DataRef = new(DataRef) + if err := t.DataRef.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DataRef pointer: %w", err) + } + } + + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.DealStages (legacytypes.DealStages) (struct) + case "DealStages": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.DealStages = new(DealStages) + if err := t.DealStages.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealStages pointer: %w", err) + } + } + + } + // t.SlashEpoch (abi.ChainEpoch) (int64) + case "SlashEpoch": + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative overflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.SlashEpoch = abi.ChainEpoch(extraI) + } + // t.AddFundsCid (cid.Cid) (struct) + case "AddFundsCid": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.AddFundsCid: %w", err) + } + + t.AddFundsCid = &c + } + + } + // t.MinerWorker (address.Address) (struct) + case "MinerWorker": + + { + + if err := t.MinerWorker.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.MinerWorker: %w", err) + } + + } + // t.ProposalCid (cid.Cid) (struct) + case "ProposalCid": + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ProposalCid: %w", err) + } + + t.ProposalCid = c + + } + // t.CreationTime (typegen.CborTime) (struct) + case "CreationTime": + + { + + if err := t.CreationTime.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.CreationTime: %w", err) + } + + } + // t.SectorNumber (abi.SectorNumber) (uint64) + case "SectorNumber": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.SectorNumber = abi.SectorNumber(extra) + + } + // t.FastRetrieval (bool) (bool) + case "FastRetrieval": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.FastRetrieval = false + case 21: + t.FastRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.FundsReserved (big.Int) (struct) + case "FundsReserved": + + { + + if err := t.FundsReserved.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.FundsReserved: %w", err) + } + + } + // t.PollErrorCount (uint64) (uint64) + case "PollErrorCount": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PollErrorCount = uint64(extra) + + } + // t.PollRetryCount (uint64) (uint64) + case "PollRetryCount": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PollRetryCount = uint64(extra) + + } + // t.PublishMessage (cid.Cid) (struct) + case "PublishMessage": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishMessage: %w", err) + } + + t.PublishMessage = &c + } + + } + // t.TransferChannelID (datatransfer.ChannelID) (struct) + case "TransferChannelID": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.TransferChannelID = new(datatransfer.ChannelID) + if err := t.TransferChannelID.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.TransferChannelID pointer: %w", err) + } + } + + } + // t.ClientDealProposal (market.ClientDealProposal) (struct) + case "ClientDealProposal": + + { + + if err := t.ClientDealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ClientDealProposal: %w", err) + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *MinerDeal) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{180}); err != nil { + return err + } + + // t.Ref (legacytypes.DataRef) (struct) + if len("Ref") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Ref\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Ref"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Ref")); err != nil { + return err + } + + if err := t.Ref.MarshalCBOR(cw); err != nil { + return err + } + + // t.Miner (peer.ID) (string) + if len("Miner") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Miner\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Miner"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Miner")); err != nil { + return err + } + + if len(t.Miner) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Miner was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Miner))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Miner)); err != nil { + return err + } + + // t.State (uint64) (uint64) + if len("State") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"State\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("State"))); err != nil { + return err + } + if _, err := cw.WriteString(string("State")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.State)); err != nil { + return err + } + + // t.Client (peer.ID) (string) + if len("Client") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Client\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Client"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Client")); err != nil { + return err + } + + if len(t.Client) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Client was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Client))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Client)); err != nil { + return err + } + + // t.DealID (abi.DealID) (uint64) + if len("DealID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Message)); err != nil { + return err + } + + // t.PiecePath (filestore.Path) (string) + if len("PiecePath") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PiecePath\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PiecePath"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PiecePath")); err != nil { + return err + } + + if len(t.PiecePath) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.PiecePath was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.PiecePath))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.PiecePath)); err != nil { + return err + } + + // t.InboundCAR (string) (string) + if len("InboundCAR") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"InboundCAR\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("InboundCAR"))); err != nil { + return err + } + if _, err := cw.WriteString(string("InboundCAR")); err != nil { + return err + } + + if len(t.InboundCAR) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.InboundCAR was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.InboundCAR))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.InboundCAR)); err != nil { + return err + } + + // t.PublishCid (cid.Cid) (struct) + if len("PublishCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PublishCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PublishCid"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PublishCid")); err != nil { + return err + } + + if t.PublishCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PublishCid); err != nil { + return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err) + } + } + + // t.SlashEpoch (abi.ChainEpoch) (int64) + if len("SlashEpoch") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SlashEpoch\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SlashEpoch"))); err != nil { + return err + } + if _, err := cw.WriteString(string("SlashEpoch")); err != nil { + return err + } + + if t.SlashEpoch >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SlashEpoch)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.SlashEpoch-1)); err != nil { + return err + } + } + + // t.AddFundsCid (cid.Cid) (struct) + if len("AddFundsCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"AddFundsCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("AddFundsCid"))); err != nil { + return err + } + if _, err := cw.WriteString(string("AddFundsCid")); err != nil { + return err + } + + if t.AddFundsCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.AddFundsCid); err != nil { + return xerrors.Errorf("failed to write cid field t.AddFundsCid: %w", err) + } + } + + // t.ProposalCid (cid.Cid) (struct) + if len("ProposalCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ProposalCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ProposalCid"))); err != nil { + return err + } + if _, err := cw.WriteString(string("ProposalCid")); err != nil { + return err + } + + if err := cbg.WriteCid(cw, t.ProposalCid); err != nil { + return xerrors.Errorf("failed to write cid field t.ProposalCid: %w", err) + } + + // t.CreationTime (typegen.CborTime) (struct) + if len("CreationTime") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CreationTime\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("CreationTime"))); err != nil { + return err + } + if _, err := cw.WriteString(string("CreationTime")); err != nil { + return err + } + + if err := t.CreationTime.MarshalCBOR(cw); err != nil { + return err + } + + // t.MetadataPath (filestore.Path) (string) + if len("MetadataPath") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MetadataPath\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("MetadataPath"))); err != nil { + return err + } + if _, err := cw.WriteString(string("MetadataPath")); err != nil { + return err + } + + if len(t.MetadataPath) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.MetadataPath was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.MetadataPath))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.MetadataPath)); err != nil { + return err + } + + // t.SectorNumber (abi.SectorNumber) (uint64) + if len("SectorNumber") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SectorNumber\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SectorNumber"))); err != nil { + return err + } + if _, err := cw.WriteString(string("SectorNumber")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SectorNumber)); err != nil { + return err + } + + // t.FastRetrieval (bool) (bool) + if len("FastRetrieval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FastRetrieval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("FastRetrieval"))); err != nil { + return err + } + if _, err := cw.WriteString(string("FastRetrieval")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.FastRetrieval); err != nil { + return err + } + + // t.FundsReserved (big.Int) (struct) + if len("FundsReserved") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FundsReserved\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("FundsReserved"))); err != nil { + return err + } + if _, err := cw.WriteString(string("FundsReserved")); err != nil { + return err + } + + if err := t.FundsReserved.MarshalCBOR(cw); err != nil { + return err + } + + // t.TransferChannelId (datatransfer.ChannelID) (struct) + if len("TransferChannelId") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TransferChannelId\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("TransferChannelId"))); err != nil { + return err + } + if _, err := cw.WriteString(string("TransferChannelId")); err != nil { + return err + } + + if err := t.TransferChannelId.MarshalCBOR(cw); err != nil { + return err + } + + // t.ClientDealProposal (market.ClientDealProposal) (struct) + if len("ClientDealProposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ClientDealProposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ClientDealProposal"))); err != nil { + return err + } + if _, err := cw.WriteString(string("ClientDealProposal")); err != nil { + return err + } + + if err := t.ClientDealProposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.AvailableForRetrieval (bool) (bool) + if len("AvailableForRetrieval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"AvailableForRetrieval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("AvailableForRetrieval"))); err != nil { + return err + } + if _, err := cw.WriteString(string("AvailableForRetrieval")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.AvailableForRetrieval); err != nil { + return err + } + return nil +} + +func (t *MinerDeal) UnmarshalCBOR(r io.Reader) (err error) { + *t = MinerDeal{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("MinerDeal: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Ref (legacytypes.DataRef) (struct) + case "Ref": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Ref = new(DataRef) + if err := t.Ref.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Ref pointer: %w", err) + } + } + + } + // t.Miner (peer.ID) (string) + case "Miner": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Miner = peer.ID(sval) + } + // t.State (uint64) (uint64) + case "State": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.State = uint64(extra) + + } + // t.Client (peer.ID) (string) + case "Client": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Client = peer.ID(sval) + } + // t.DealID (abi.DealID) (uint64) + case "DealID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.PiecePath (filestore.Path) (string) + case "PiecePath": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.PiecePath = filestore.Path(sval) + } + // t.InboundCAR (string) (string) + case "InboundCAR": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.InboundCAR = string(sval) + } + // t.PublishCid (cid.Cid) (struct) + case "PublishCid": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) + } + + t.PublishCid = &c + } + + } + // t.SlashEpoch (abi.ChainEpoch) (int64) + case "SlashEpoch": + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative overflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.SlashEpoch = abi.ChainEpoch(extraI) + } + // t.AddFundsCid (cid.Cid) (struct) + case "AddFundsCid": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.AddFundsCid: %w", err) + } + + t.AddFundsCid = &c + } + + } + // t.ProposalCid (cid.Cid) (struct) + case "ProposalCid": + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ProposalCid: %w", err) + } + + t.ProposalCid = c + + } + // t.CreationTime (typegen.CborTime) (struct) + case "CreationTime": + + { + + if err := t.CreationTime.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.CreationTime: %w", err) + } + + } + // t.MetadataPath (filestore.Path) (string) + case "MetadataPath": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.MetadataPath = filestore.Path(sval) + } + // t.SectorNumber (abi.SectorNumber) (uint64) + case "SectorNumber": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.SectorNumber = abi.SectorNumber(extra) + + } + // t.FastRetrieval (bool) (bool) + case "FastRetrieval": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.FastRetrieval = false + case 21: + t.FastRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.FundsReserved (big.Int) (struct) + case "FundsReserved": + + { + + if err := t.FundsReserved.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.FundsReserved: %w", err) + } + + } + // t.TransferChannelId (datatransfer.ChannelID) (struct) + case "TransferChannelId": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.TransferChannelId = new(datatransfer.ChannelID) + if err := t.TransferChannelId.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.TransferChannelId pointer: %w", err) + } + } + + } + // t.ClientDealProposal (market.ClientDealProposal) (struct) + case "ClientDealProposal": + + { + + if err := t.ClientDealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ClientDealProposal: %w", err) + } + + } + // t.AvailableForRetrieval (bool) (bool) + case "AvailableForRetrieval": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.AvailableForRetrieval = false + case 21: + t.AvailableForRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *Balance) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.Locked (big.Int) (struct) + if len("Locked") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Locked\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Locked"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Locked")); err != nil { + return err + } + + if err := t.Locked.MarshalCBOR(cw); err != nil { + return err + } + + // t.Available (big.Int) (struct) + if len("Available") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Available\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Available"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Available")); err != nil { + return err + } + + if err := t.Available.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *Balance) UnmarshalCBOR(r io.Reader) (err error) { + *t = Balance{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("Balance: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Locked (big.Int) (struct) + case "Locked": + + { + + if err := t.Locked.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Locked: %w", err) + } + + } + // t.Available (big.Int) (struct) + case "Available": + + { + + if err := t.Available.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Available: %w", err) + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *SignedStorageAsk) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.Ask (legacytypes.StorageAsk) (struct) + if len("Ask") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Ask\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Ask"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Ask")); err != nil { + return err + } + + if err := t.Ask.MarshalCBOR(cw); err != nil { + return err + } + + // t.Signature (crypto.Signature) (struct) + if len("Signature") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Signature\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Signature"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Signature")); err != nil { + return err + } + + if err := t.Signature.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *SignedStorageAsk) UnmarshalCBOR(r io.Reader) (err error) { + *t = SignedStorageAsk{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("SignedStorageAsk: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Ask (legacytypes.StorageAsk) (struct) + case "Ask": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Ask = new(StorageAsk) + if err := t.Ask.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Ask pointer: %w", err) + } + } + + } + // t.Signature (crypto.Signature) (struct) + case "Signature": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Signature = new(crypto.Signature) + if err := t.Signature.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Signature pointer: %w", err) + } + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *StorageAsk) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{168}); err != nil { + return err + } + + // t.Miner (address.Address) (struct) + if len("Miner") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Miner\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Miner"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Miner")); err != nil { + return err + } + + if err := t.Miner.MarshalCBOR(cw); err != nil { + return err + } + + // t.Price (big.Int) (struct) + if len("Price") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Price\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Price"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Price")); err != nil { + return err + } + + if err := t.Price.MarshalCBOR(cw); err != nil { + return err + } + + // t.SeqNo (uint64) (uint64) + if len("SeqNo") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SeqNo\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SeqNo"))); err != nil { + return err + } + if _, err := cw.WriteString(string("SeqNo")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SeqNo)); err != nil { + return err + } + + // t.Expiry (abi.ChainEpoch) (int64) + if len("Expiry") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Expiry\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Expiry"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Expiry")); err != nil { + return err + } + + if t.Expiry >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Expiry)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Expiry-1)); err != nil { + return err + } + } + + // t.Timestamp (abi.ChainEpoch) (int64) + if len("Timestamp") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Timestamp\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Timestamp"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Timestamp")); err != nil { + return err + } + + if t.Timestamp >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Timestamp)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Timestamp-1)); err != nil { + return err + } + } + + // t.MaxPieceSize (abi.PaddedPieceSize) (uint64) + if len("MaxPieceSize") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MaxPieceSize\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("MaxPieceSize"))); err != nil { + return err + } + if _, err := cw.WriteString(string("MaxPieceSize")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.MaxPieceSize)); err != nil { + return err + } + + // t.MinPieceSize (abi.PaddedPieceSize) (uint64) + if len("MinPieceSize") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MinPieceSize\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("MinPieceSize"))); err != nil { + return err + } + if _, err := cw.WriteString(string("MinPieceSize")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.MinPieceSize)); err != nil { + return err + } + + // t.VerifiedPrice (big.Int) (struct) + if len("VerifiedPrice") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"VerifiedPrice\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("VerifiedPrice"))); err != nil { + return err + } + if _, err := cw.WriteString(string("VerifiedPrice")); err != nil { + return err + } + + if err := t.VerifiedPrice.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *StorageAsk) UnmarshalCBOR(r io.Reader) (err error) { + *t = StorageAsk{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("StorageAsk: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Miner (address.Address) (struct) + case "Miner": + + { + + if err := t.Miner.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Miner: %w", err) + } + + } + // t.Price (big.Int) (struct) + case "Price": + + { + + if err := t.Price.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Price: %w", err) + } + + } + // t.SeqNo (uint64) (uint64) + case "SeqNo": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.SeqNo = uint64(extra) + + } + // t.Expiry (abi.ChainEpoch) (int64) + case "Expiry": + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative overflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Expiry = abi.ChainEpoch(extraI) + } + // t.Timestamp (abi.ChainEpoch) (int64) + case "Timestamp": + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative overflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Timestamp = abi.ChainEpoch(extraI) + } + // t.MaxPieceSize (abi.PaddedPieceSize) (uint64) + case "MaxPieceSize": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.MaxPieceSize = abi.PaddedPieceSize(extra) + + } + // t.MinPieceSize (abi.PaddedPieceSize) (uint64) + case "MinPieceSize": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.MinPieceSize = abi.PaddedPieceSize(extra) + + } + // t.VerifiedPrice (big.Int) (struct) + case "VerifiedPrice": + + { + + if err := t.VerifiedPrice.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.VerifiedPrice: %w", err) + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DataRef) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{165}); err != nil { + return err + } + + // t.Root (cid.Cid) (struct) + if len("Root") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Root\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Root"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Root")); err != nil { + return err + } + + if err := cbg.WriteCid(cw, t.Root); err != nil { + return xerrors.Errorf("failed to write cid field t.Root: %w", err) + } + + // t.PieceCid (cid.Cid) (struct) + if len("PieceCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PieceCid"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PieceCid")); err != nil { + return err + } + + if t.PieceCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PieceCid); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCid: %w", err) + } + } + + // t.PieceSize (abi.UnpaddedPieceSize) (uint64) + if len("PieceSize") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceSize\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PieceSize"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PieceSize")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PieceSize)); err != nil { + return err + } + + // t.RawBlockSize (uint64) (uint64) + if len("RawBlockSize") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"RawBlockSize\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("RawBlockSize"))); err != nil { + return err + } + if _, err := cw.WriteString(string("RawBlockSize")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.RawBlockSize)); err != nil { + return err + } + + // t.TransferType (string) (string) + if len("TransferType") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TransferType\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("TransferType"))); err != nil { + return err + } + if _, err := cw.WriteString(string("TransferType")); err != nil { + return err + } + + if len(t.TransferType) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.TransferType was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.TransferType))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.TransferType)); err != nil { + return err + } + return nil +} + +func (t *DataRef) UnmarshalCBOR(r io.Reader) (err error) { + *t = DataRef{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DataRef: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Root (cid.Cid) (struct) + case "Root": + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Root: %w", err) + } + + t.Root = c + + } + // t.PieceCid (cid.Cid) (struct) + case "PieceCid": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCid: %w", err) + } + + t.PieceCid = &c + } + + } + // t.PieceSize (abi.UnpaddedPieceSize) (uint64) + case "PieceSize": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PieceSize = abi.UnpaddedPieceSize(extra) + + } + // t.RawBlockSize (uint64) (uint64) + case "RawBlockSize": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.RawBlockSize = uint64(extra) + + } + // t.TransferType (string) (string) + case "TransferType": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.TransferType = string(sval) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *ProviderDealState) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{168}); err != nil { + return err + } + + // t.State (uint64) (uint64) + if len("State") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"State\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("State"))); err != nil { + return err + } + if _, err := cw.WriteString(string("State")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.State)); err != nil { + return err + } + + // t.DealID (abi.DealID) (uint64) + if len("DealID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Message)); err != nil { + return err + } + + // t.Proposal (market.DealProposal) (struct) + if len("Proposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Proposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Proposal"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Proposal")); err != nil { + return err + } + + if err := t.Proposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.PublishCid (cid.Cid) (struct) + if len("PublishCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PublishCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PublishCid"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PublishCid")); err != nil { + return err + } + + if t.PublishCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PublishCid); err != nil { + return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err) + } + } + + // t.AddFundsCid (cid.Cid) (struct) + if len("AddFundsCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"AddFundsCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("AddFundsCid"))); err != nil { + return err + } + if _, err := cw.WriteString(string("AddFundsCid")); err != nil { + return err + } + + if t.AddFundsCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.AddFundsCid); err != nil { + return xerrors.Errorf("failed to write cid field t.AddFundsCid: %w", err) + } + } + + // t.ProposalCid (cid.Cid) (struct) + if len("ProposalCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ProposalCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ProposalCid"))); err != nil { + return err + } + if _, err := cw.WriteString(string("ProposalCid")); err != nil { + return err + } + + if t.ProposalCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.ProposalCid); err != nil { + return xerrors.Errorf("failed to write cid field t.ProposalCid: %w", err) + } + } + + // t.FastRetrieval (bool) (bool) + if len("FastRetrieval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FastRetrieval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("FastRetrieval"))); err != nil { + return err + } + if _, err := cw.WriteString(string("FastRetrieval")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.FastRetrieval); err != nil { + return err + } + return nil +} + +func (t *ProviderDealState) UnmarshalCBOR(r io.Reader) (err error) { + *t = ProviderDealState{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("ProviderDealState: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.State (uint64) (uint64) + case "State": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.State = uint64(extra) + + } + // t.DealID (abi.DealID) (uint64) + case "DealID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.Proposal (market.DealProposal) (struct) + case "Proposal": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Proposal = new(market.DealProposal) + if err := t.Proposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Proposal pointer: %w", err) + } + } + + } + // t.PublishCid (cid.Cid) (struct) + case "PublishCid": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) + } + + t.PublishCid = &c + } + + } + // t.AddFundsCid (cid.Cid) (struct) + case "AddFundsCid": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.AddFundsCid: %w", err) + } + + t.AddFundsCid = &c + } + + } + // t.ProposalCid (cid.Cid) (struct) + case "ProposalCid": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ProposalCid: %w", err) + } + + t.ProposalCid = &c + } + + } + // t.FastRetrieval (bool) (bool) + case "FastRetrieval": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.FastRetrieval = false + case 21: + t.FastRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DealStages) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{161}); err != nil { + return err + } + + // t.Stages ([]*legacytypes.DealStage) (slice) + if len("Stages") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Stages\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Stages"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Stages")); err != nil { + return err + } + + if len(t.Stages) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Stages was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Stages))); err != nil { + return err + } + for _, v := range t.Stages { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } + return nil +} + +func (t *DealStages) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealStages{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealStages: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Stages ([]*legacytypes.DealStage) (slice) + case "Stages": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Stages: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Stages = make([]*DealStage, extra) + } + + for i := 0; i < int(extra); i++ { + + var v DealStage + if err := v.UnmarshalCBOR(cr); err != nil { + return err + } + + t.Stages[i] = &v + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DealStage) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{166}); err != nil { + return err + } + + // t.Logs ([]*legacytypes.Log) (slice) + if len("Logs") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Logs\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Logs"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Logs")); err != nil { + return err + } + + if len(t.Logs) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Logs was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Logs))); err != nil { + return err + } + for _, v := range t.Logs { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } + + // t.Name (string) (string) + if len("Name") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Name\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Name"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Name")); err != nil { + return err + } + + if len(t.Name) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Name was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Name))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Name)); err != nil { + return err + } + + // t.CreatedTime (typegen.CborTime) (struct) + if len("CreatedTime") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CreatedTime\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("CreatedTime"))); err != nil { + return err + } + if _, err := cw.WriteString(string("CreatedTime")); err != nil { + return err + } + + if err := t.CreatedTime.MarshalCBOR(cw); err != nil { + return err + } + + // t.Description (string) (string) + if len("Description") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Description\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Description"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Description")); err != nil { + return err + } + + if len(t.Description) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Description was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Description))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Description)); err != nil { + return err + } + + // t.UpdatedTime (typegen.CborTime) (struct) + if len("UpdatedTime") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"UpdatedTime\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("UpdatedTime"))); err != nil { + return err + } + if _, err := cw.WriteString(string("UpdatedTime")); err != nil { + return err + } + + if err := t.UpdatedTime.MarshalCBOR(cw); err != nil { + return err + } + + // t.ExpectedDuration (string) (string) + if len("ExpectedDuration") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ExpectedDuration\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ExpectedDuration"))); err != nil { + return err + } + if _, err := cw.WriteString(string("ExpectedDuration")); err != nil { + return err + } + + if len(t.ExpectedDuration) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.ExpectedDuration was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.ExpectedDuration))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.ExpectedDuration)); err != nil { + return err + } + return nil +} + +func (t *DealStage) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealStage{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealStage: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Logs ([]*legacytypes.Log) (slice) + case "Logs": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Logs: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Logs = make([]*Log, extra) + } + + for i := 0; i < int(extra); i++ { + + var v Log + if err := v.UnmarshalCBOR(cr); err != nil { + return err + } + + t.Logs[i] = &v + } + + // t.Name (string) (string) + case "Name": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Name = string(sval) + } + // t.CreatedTime (typegen.CborTime) (struct) + case "CreatedTime": + + { + + if err := t.CreatedTime.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.CreatedTime: %w", err) + } + + } + // t.Description (string) (string) + case "Description": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Description = string(sval) + } + // t.UpdatedTime (typegen.CborTime) (struct) + case "UpdatedTime": + + { + + if err := t.UpdatedTime.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.UpdatedTime: %w", err) + } + + } + // t.ExpectedDuration (string) (string) + case "ExpectedDuration": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.ExpectedDuration = string(sval) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *Log) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.Log (string) (string) + if len("Log") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Log\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Log"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Log")); err != nil { + return err + } + + if len(t.Log) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Log was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Log))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Log)); err != nil { + return err + } + + // t.UpdatedTime (typegen.CborTime) (struct) + if len("UpdatedTime") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"UpdatedTime\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("UpdatedTime"))); err != nil { + return err + } + if _, err := cw.WriteString(string("UpdatedTime")); err != nil { + return err + } + + if err := t.UpdatedTime.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *Log) UnmarshalCBOR(r io.Reader) (err error) { + *t = Log{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("Log: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Log (string) (string) + case "Log": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Log = string(sval) + } + // t.UpdatedTime (typegen.CborTime) (struct) + case "UpdatedTime": + + { + + if err := t.UpdatedTime.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.UpdatedTime: %w", err) + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} diff --git a/storagemarket/types/mock_types/mocks.go b/storagemarket/types/mock_types/mocks.go index bbd01124b..e15cd4810 100644 --- a/storagemarket/types/mock_types/mocks.go +++ b/storagemarket/types/mock_types/mocks.go @@ -9,7 +9,6 @@ import ( io "io" reflect "reflect" - storagemarket "github.com/filecoin-project/boost-gfm/storagemarket" types "github.com/filecoin-project/boost/storagemarket/types" abi "github.com/filecoin-project/go-state-types/abi" market "github.com/filecoin-project/go-state-types/builtin/v9/market" @@ -157,10 +156,10 @@ func (m *MockChainDealManager) EXPECT() *MockChainDealManagerMockRecorder { } // WaitForPublishDeals mocks base method. -func (m *MockChainDealManager) WaitForPublishDeals(arg0 context.Context, arg1 cid.Cid, arg2 market.DealProposal) (*storagemarket.PublishDealsWaitResult, error) { +func (m *MockChainDealManager) WaitForPublishDeals(arg0 context.Context, arg1 cid.Cid, arg2 market.DealProposal) (*types.PublishDealsWaitResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WaitForPublishDeals", arg0, arg1, arg2) - ret0, _ := ret[0].(*storagemarket.PublishDealsWaitResult) + ret0, _ := ret[0].(*types.PublishDealsWaitResult) ret1, _ := ret[1].(error) return ret0, ret1 } diff --git a/storagemarket/types/types.go b/storagemarket/types/types.go index 4fb369c85..c40ac08a3 100644 --- a/storagemarket/types/types.go +++ b/storagemarket/types/types.go @@ -7,7 +7,7 @@ import ( "io" "net/url" - "github.com/filecoin-project/boost-gfm/storagemarket" + "github.com/filecoin-project/boost/storagemarket/types/legacytypes" "github.com/filecoin-project/boost/transport/httptransport/util" "github.com/filecoin-project/boost/transport/types" "github.com/filecoin-project/go-address" @@ -18,9 +18,12 @@ import ( "github.com/filecoin-project/lotus/storage/sealer/storiface" "github.com/google/uuid" "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" "github.com/ipni/go-libipni/maurl" ) +var log = logging.Logger("boost-provider-types") + //go:generate cbor-gen-for --map-encoding StorageAsk DealParamsV120 DealParams Transfer DealResponse DealStatusRequest DealStatusResponse DealStatus //go:generate go run github.com/golang/mock/mockgen -destination=mock_types/mocks.go -package=mock_types . PieceAdder,CommpCalculator,DealPublisher,ChainDealManager,IndexProvider @@ -154,7 +157,7 @@ type DealPublisher interface { } type ChainDealManager interface { - WaitForPublishDeals(ctx context.Context, publishCid cid.Cid, proposal market.DealProposal) (*storagemarket.PublishDealsWaitResult, error) + WaitForPublishDeals(ctx context.Context, publishCid cid.Cid, proposal market.DealProposal) (*PublishDealsWaitResult, error) } type IndexProvider interface { @@ -164,9 +167,16 @@ type IndexProvider interface { } type AskGetter interface { - GetAsk() *storagemarket.SignedStorageAsk + GetAsk(miner address.Address) *legacytypes.SignedStorageAsk } type SignatureVerifier interface { VerifySignature(ctx context.Context, sig crypto.Signature, addr address.Address, input []byte) (bool, error) } + +// PublishDealsWaitResult is the result of a call to wait for publish deals to +// appear on chain +type PublishDealsWaitResult struct { + DealID abi.DealID + FinalCid cid.Cid +} From 3912bb497811909541e4c2c75c5d14b1ab26b636 Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Mon, 9 Oct 2023 19:01:33 +0400 Subject: [PATCH 02/34] make gen, docsgen --- api/proxy_gen.go | 182 ----- build/openrpc/boost.json.gz | Bin 5447 -> 2437 bytes documentation/en/api-v1-methods.md | 1031 ++++------------------------ 3 files changed, 121 insertions(+), 1092 deletions(-) diff --git a/api/proxy_gen.go b/api/proxy_gen.go index b63af95c9..4b36f1489 100644 --- a/api/proxy_gen.go +++ b/api/proxy_gen.go @@ -55,34 +55,6 @@ type BoostStruct struct { BoostOfflineDealWithData func(p0 context.Context, p1 uuid.UUID, p2 string, p3 bool) (*ProviderDealRejectionInfo, error) `perm:"admin"` - DealsConsiderOfflineRetrievalDeals func(p0 context.Context) (bool, error) `perm:"admin"` - - DealsConsiderOfflineStorageDeals func(p0 context.Context) (bool, error) `perm:"admin"` - - DealsConsiderOnlineRetrievalDeals func(p0 context.Context) (bool, error) `perm:"admin"` - - DealsConsiderOnlineStorageDeals func(p0 context.Context) (bool, error) `perm:"admin"` - - DealsConsiderUnverifiedStorageDeals func(p0 context.Context) (bool, error) `perm:"admin"` - - DealsConsiderVerifiedStorageDeals func(p0 context.Context) (bool, error) `perm:"admin"` - - DealsPieceCidBlocklist func(p0 context.Context) ([]cid.Cid, error) `perm:"admin"` - - DealsSetConsiderOfflineRetrievalDeals func(p0 context.Context, p1 bool) error `perm:"admin"` - - DealsSetConsiderOfflineStorageDeals func(p0 context.Context, p1 bool) error `perm:"admin"` - - DealsSetConsiderOnlineRetrievalDeals func(p0 context.Context, p1 bool) error `perm:"admin"` - - DealsSetConsiderOnlineStorageDeals func(p0 context.Context, p1 bool) error `perm:"admin"` - - DealsSetConsiderUnverifiedStorageDeals func(p0 context.Context, p1 bool) error `perm:"admin"` - - DealsSetConsiderVerifiedStorageDeals func(p0 context.Context, p1 bool) error `perm:"admin"` - - DealsSetPieceCidBlocklist func(p0 context.Context, p1 []cid.Cid) error `perm:"admin"` - OnlineBackup func(p0 context.Context, p1 string) error `perm:"admin"` PdBuildIndexForPieceCid func(p0 context.Context, p1 cid.Cid) error `perm:"admin"` @@ -346,160 +318,6 @@ func (s *BoostStub) BoostOfflineDealWithData(p0 context.Context, p1 uuid.UUID, p return nil, ErrNotSupported } -func (s *BoostStruct) DealsConsiderOfflineRetrievalDeals(p0 context.Context) (bool, error) { - if s.Internal.DealsConsiderOfflineRetrievalDeals == nil { - return false, ErrNotSupported - } - return s.Internal.DealsConsiderOfflineRetrievalDeals(p0) -} - -func (s *BoostStub) DealsConsiderOfflineRetrievalDeals(p0 context.Context) (bool, error) { - return false, ErrNotSupported -} - -func (s *BoostStruct) DealsConsiderOfflineStorageDeals(p0 context.Context) (bool, error) { - if s.Internal.DealsConsiderOfflineStorageDeals == nil { - return false, ErrNotSupported - } - return s.Internal.DealsConsiderOfflineStorageDeals(p0) -} - -func (s *BoostStub) DealsConsiderOfflineStorageDeals(p0 context.Context) (bool, error) { - return false, ErrNotSupported -} - -func (s *BoostStruct) DealsConsiderOnlineRetrievalDeals(p0 context.Context) (bool, error) { - if s.Internal.DealsConsiderOnlineRetrievalDeals == nil { - return false, ErrNotSupported - } - return s.Internal.DealsConsiderOnlineRetrievalDeals(p0) -} - -func (s *BoostStub) DealsConsiderOnlineRetrievalDeals(p0 context.Context) (bool, error) { - return false, ErrNotSupported -} - -func (s *BoostStruct) DealsConsiderOnlineStorageDeals(p0 context.Context) (bool, error) { - if s.Internal.DealsConsiderOnlineStorageDeals == nil { - return false, ErrNotSupported - } - return s.Internal.DealsConsiderOnlineStorageDeals(p0) -} - -func (s *BoostStub) DealsConsiderOnlineStorageDeals(p0 context.Context) (bool, error) { - return false, ErrNotSupported -} - -func (s *BoostStruct) DealsConsiderUnverifiedStorageDeals(p0 context.Context) (bool, error) { - if s.Internal.DealsConsiderUnverifiedStorageDeals == nil { - return false, ErrNotSupported - } - return s.Internal.DealsConsiderUnverifiedStorageDeals(p0) -} - -func (s *BoostStub) DealsConsiderUnverifiedStorageDeals(p0 context.Context) (bool, error) { - return false, ErrNotSupported -} - -func (s *BoostStruct) DealsConsiderVerifiedStorageDeals(p0 context.Context) (bool, error) { - if s.Internal.DealsConsiderVerifiedStorageDeals == nil { - return false, ErrNotSupported - } - return s.Internal.DealsConsiderVerifiedStorageDeals(p0) -} - -func (s *BoostStub) DealsConsiderVerifiedStorageDeals(p0 context.Context) (bool, error) { - return false, ErrNotSupported -} - -func (s *BoostStruct) DealsPieceCidBlocklist(p0 context.Context) ([]cid.Cid, error) { - if s.Internal.DealsPieceCidBlocklist == nil { - return *new([]cid.Cid), ErrNotSupported - } - return s.Internal.DealsPieceCidBlocklist(p0) -} - -func (s *BoostStub) DealsPieceCidBlocklist(p0 context.Context) ([]cid.Cid, error) { - return *new([]cid.Cid), ErrNotSupported -} - -func (s *BoostStruct) DealsSetConsiderOfflineRetrievalDeals(p0 context.Context, p1 bool) error { - if s.Internal.DealsSetConsiderOfflineRetrievalDeals == nil { - return ErrNotSupported - } - return s.Internal.DealsSetConsiderOfflineRetrievalDeals(p0, p1) -} - -func (s *BoostStub) DealsSetConsiderOfflineRetrievalDeals(p0 context.Context, p1 bool) error { - return ErrNotSupported -} - -func (s *BoostStruct) DealsSetConsiderOfflineStorageDeals(p0 context.Context, p1 bool) error { - if s.Internal.DealsSetConsiderOfflineStorageDeals == nil { - return ErrNotSupported - } - return s.Internal.DealsSetConsiderOfflineStorageDeals(p0, p1) -} - -func (s *BoostStub) DealsSetConsiderOfflineStorageDeals(p0 context.Context, p1 bool) error { - return ErrNotSupported -} - -func (s *BoostStruct) DealsSetConsiderOnlineRetrievalDeals(p0 context.Context, p1 bool) error { - if s.Internal.DealsSetConsiderOnlineRetrievalDeals == nil { - return ErrNotSupported - } - return s.Internal.DealsSetConsiderOnlineRetrievalDeals(p0, p1) -} - -func (s *BoostStub) DealsSetConsiderOnlineRetrievalDeals(p0 context.Context, p1 bool) error { - return ErrNotSupported -} - -func (s *BoostStruct) DealsSetConsiderOnlineStorageDeals(p0 context.Context, p1 bool) error { - if s.Internal.DealsSetConsiderOnlineStorageDeals == nil { - return ErrNotSupported - } - return s.Internal.DealsSetConsiderOnlineStorageDeals(p0, p1) -} - -func (s *BoostStub) DealsSetConsiderOnlineStorageDeals(p0 context.Context, p1 bool) error { - return ErrNotSupported -} - -func (s *BoostStruct) DealsSetConsiderUnverifiedStorageDeals(p0 context.Context, p1 bool) error { - if s.Internal.DealsSetConsiderUnverifiedStorageDeals == nil { - return ErrNotSupported - } - return s.Internal.DealsSetConsiderUnverifiedStorageDeals(p0, p1) -} - -func (s *BoostStub) DealsSetConsiderUnverifiedStorageDeals(p0 context.Context, p1 bool) error { - return ErrNotSupported -} - -func (s *BoostStruct) DealsSetConsiderVerifiedStorageDeals(p0 context.Context, p1 bool) error { - if s.Internal.DealsSetConsiderVerifiedStorageDeals == nil { - return ErrNotSupported - } - return s.Internal.DealsSetConsiderVerifiedStorageDeals(p0, p1) -} - -func (s *BoostStub) DealsSetConsiderVerifiedStorageDeals(p0 context.Context, p1 bool) error { - return ErrNotSupported -} - -func (s *BoostStruct) DealsSetPieceCidBlocklist(p0 context.Context, p1 []cid.Cid) error { - if s.Internal.DealsSetPieceCidBlocklist == nil { - return ErrNotSupported - } - return s.Internal.DealsSetPieceCidBlocklist(p0, p1) -} - -func (s *BoostStub) DealsSetPieceCidBlocklist(p0 context.Context, p1 []cid.Cid) error { - return ErrNotSupported -} - func (s *BoostStruct) OnlineBackup(p0 context.Context, p1 string) error { if s.Internal.OnlineBackup == nil { return ErrNotSupported diff --git a/build/openrpc/boost.json.gz b/build/openrpc/boost.json.gz index a52465902230ff013356ec866b616f00ba60d677..ac0cd7b57c383501a1549dc4106e1d6144f165f4 100644 GIT binary patch literal 2437 zcmV;033~P)iwFP!00000|Lk0QQ`@)}|0){Yf1qPR9xU5thCg_v%?O+Svf{>Im+{VpXqgLM#b_0(s?136`N8K$mUGJ zovQ6e|69m!Ae%b1ed?^Ewe!pVNP&ew2rkyq{RO*Fbbw^7!xRWc+*w}G&JJSKjpkh_ z0V)s-y<+e!rnGLMLm@tL)eC%|3k8mzIo7}8$$6yL{JQ3C{eq1z*wxh)wy+F5kBAX& zeXR?bL`LK=%a}Yrv^sFI~LoGMBBcAUJC``?KTr1ecY!|hRvw6;~h zC2zp>H;P!yH67)I&GH{g^bbLpVj%8#!8P2(;)S2+#%&mXv?G0`A#}NLz zvx8UV&oHK?7pe9Ex24{Bmd|%o4TC-s;Bf`WrR)Y=r1a?btQ3pG zieA1PS}0S6aYa)!V@rKyK#94`2bBsXeomC2EbB9)u_YJ80u)x`N+Q z2ZUbOQ)EfLl5Al{1xDRcn2=)l$?;%v_+p`K{rBkKhT&K8PlvZuLA>=UjxOWZE-mbl zyG}&3v29_Gvec_&J`sY99$<_4x1ZsA;nd}VyvKU}SwJ1!E+$k9?+efe*j0K-z24Xe ze@^T5cKGu}PjM-*PqB11snD|2m0^FfI=&x7)JN)M!nUv@Sh+w!aJd< z#`2FC!)-jMH=3<2Y~e(1>9J$d2RD*hPZ6OGeQpmm*+#u#;X~%k4)hckigVvi2|WH3EzkqnHwG_<2`!KNMua+_%zh2riBlPBDkGR24yx#LFjp!J^f&+oq9eSiPu?^`(fOZo4%ccnP{W6%D4ceMAm*F2Jk z{N3rD_;7st{x3DaR;VzV>IvuS@oulf`aEEc2H7rAT6~d5L$|~Yv@V|t!sI~TQzDW1 z8PV~=qs_h3jS(m-Itxb{v(?nH9}>#+@idL!wzLj)gT71UFjQ0)xUPkJU@IO9@QsB( z4F(caqHxE+uGKJkwLb*=#^*ZvZa2~|7a7i>5Lxz5K!rw30>!a~Kkkhb$P=)E-fA3q zW8t?%Dm{zB?Nrxy8RG%7;gol}A3M>gh8yBIAv8zE2|`f~1+m-pQ$zO|68AC0O&t8Z z)C@QquZUldX9ZJk{%*KJM`}dN&>;#o6!l=bDh`9jbNX-O+lWymgkvz(5~~x&JaB0E zN1rkxMu*Jijy{4mDhb=6abyk`8NhPkP1o|i4tzqKe8_2+F3N0|1-h~nrg&RKTVm`k zyGhy~ElfQ!caictMfjWgo6;;Ad`VdvX?IqHTeE8`yX&kc&KhG@O-+hB>%Q&{CO-{K zilW54Gb?xX)7a@gi8SXE6?0shX*4kd9MI!)uOWC-KXQ-l8}-P5jC%i3Rp~o(%oa zseZG$B-P&=X+IB6;yk0$)hgM)fztAl-~-D6hCLs*LSQ~rRYd&NiilO=qAFZeg^Q|i zQ57z#!bMfMs0tTV;i4*BRE3MGa8VU5o~UrqYAq^U1fDlq^@9q@+PvaKlD>s;NO;Zb zXzf)xnL7b}4$4`F4fygpMY7=|Rwhzsc=B3ARvu;Y*t0PDFPO&~Ql=_vecr6~X>!SD zNEJ`Oa_-e}G2@n~DXK8O)wwO2}YF4jki+rj6 z9I3|E)`C={!yLE+k(kb|>*^aU`#p&TlniOedX!S+W)wM;%=tyh)FPkD=l&9>$>Kf+ zZZ_wO2UE?_SUH(~nWlvdsAn<3lZ1Yn~3>sx{C+1m2e9PXqot-(K^7vo+ocBm8I;j_m zeXn0Hz`hUU23G7LCQF~Ig_!L3cpb#l391HS)j+Hoh*blzY9Lk(#HxWCMmH8Djtd D_<*#; literal 5447 zcmV-N6}ajjiwFP!00000|Li?!bKADIe}#khCCMnVHp|OQKiJNuTPO9ilHPlpcwQh9 zl2B71LqLudkN*1`fQz^QBuG(?qq5B;5(OL{0B6U+!2@tyL_F67r=Z*JwTGYuFqt~w z^Z_u;Yy?ihx#Lh~Twh%nXICT80(Z!x*dgE)^xEBa=cQ+M!MX(&h|L{~g453rfWQUs z`3Bp_bTDb3nauIVNc@d|AkYF9qNayk<^%fr_3O;}LZ(Y%8qcWlLcDU!yrs#W8>!CE z+>T*7grR%ZzVQET8E)5rmgONz+h_j2&y8nrg4>&ZEdzO;<2^SXzK|~rPYv205r#Yh z?KFPIZ@(D?+kVS0%tLJH5#t3x-%~K3w~W`G_rYP~rRzE#Ll*C`=6?m9)GvAFb~xV8 zzmWA8^7ZRi&;oS1SU_(DPQk}H@{j>NWDtiLE5r;)R)*;;7KkuvOdZc)b4(3?(%YZ~ zTP8#SQE?gUNcwpM;_WA>G z)8gI5>!5w*IWF=T<}Iesrlfj zIG6_P_tNzoKIsP&&Q=E#+nF3JAZ5rqfG$38J?DP)e=|hdGv|LUd&6U}UbnJ5d~Q4D zEoF{}-Xd0xgA-j<{A*&XeSxj##EZuKFR;?gv(PmM=|X(}dcF}H*n zBMT9Rr&vPd7uYg9PJ>in3=N9x=}W)O1e2M+PK1TB6ln*Y=3z41LLT`vmYLh=v4#l!FR?p+jnC&M`PUr%?{B|>s`x!ok#cmR zAyUd42AS@~q6HJCEmzc*K&P@mDOCK8l-N%xfk{?p`pX{9i&wS z$U8`jg~!BkY(44m4pO<~@Mp7<^zcz^B*#BZa&W54%xv(m*tXzriebb1AxM5{Nzm_f z6_S8=eg$WOU0xx|JZCkYL(eKH6;xnU62f2N=e}f+3I-KKxsWI!9dc0JxuggvTg$@r z+ZuP!UZTp@t&BT9EbT0J1P@~}E7F6u{cF>Hx2Hk-Z!fl>__r4lIu~Af*g{a)wOEN< zb(n;j>!wRUm-E^)BiNes<-VQ-QHkp(Z&l1EpC7>QXeAWT3=R<-Pz1jrbHRQbVR+l` zTXah2c-T9FlOF>-7!D7u>GFry|2CzA$pTtK>%Nci%=!jeV7xRuWIYh+u}HjenqRbyo^fJ?bk3v{nt4MYvI7`>4bQhZ=lvi769gP}ZrF1JP1y zBQkZgRoVu&zG~wysIJ%&qj?Mk$I&fPZ$Xk{s z?7yBn9BT0Q6-FkaoE0G^n9L%Nd<%A0!c^(LMHB{znhL29L_xG=f%3(YkJr;i)Vy)(W3f0Ee-$z^jNR>{Kq=Xsf0JbOo7@WGOKL)a9 z@=qe$(EmQt|FJ6fL68E1*~!o#cLZ$}buX9LYJd7PdiC6R_JS_>MAG(^=iFfndAy@= zONtF!1Z_FB6Kt2aSAB$cPT|qu^@zN4lr(ZVc50ydU{MyTcY(U=sUQml3O8 z{S4l@O^YY;0r2*pme>NPdXw4$7armRIAgp^r_+5Y{(S6oPQ{-02`9+* zXKl=&$6mXRIp<)zoo)-fCRXynm^mJtp(_uY=n8qkkDwESe&N_QWXR)~h#!1JQjgc+ z^D&+g=rhv;_$a8V*8;B~gWxoqbGBZ$z=-}ao!Xe7%}5a>OV{rcAN&%av$~9UG_!-Ll`$-G@F`ZJxLV~+QWX9&jdp$PCVBfa1y9^bO*b>lWB* zw1u5AugeO$vJf`%R)<;w*Pt(;ObW|D2p$mMz;(8ugq4Jb%Fz9*z5=h1Q%Ugy6B=4*@ zLLeVhlkxmd%Xn@A7fs-z30yRRizaZ<1TLDuMH9Ga0vAo-q6u6yfr}<^v7^96H}^p0 zahl$47*R!$;?L?KlYQb z{x$triObY@t&i)q?#7pV1XuAIEu1^_iBJS>WL-G!DqO4aEq1xH6uM58{I13~X?&B$ zH~EEplN$VpPyPb`JU^nq(3gokS-Y$Nl(lNi=;u{uNG;VRWQdlnf~tfgKbx+522~0&=iF)KQ>QB2cXAS4bpW&3 zt^bL;^-q*@2ZOSl`;IZUF7D;9RPn9PH{ofs?bw;WD9i>hkNlPBf zNfUF8XTcjm)CeM7L3Ef`*e2k$OH5fbsb~x1HU&thuDYaTgRVSEy)Cz{g{-PZRne3k zWs?xN&zvnU$04sZl2#{GE0TmINfJ6`_ti=Es>sD)m{;`YJ-kH~_YlLbkF|vuzOTL= z#MlU$4a8;xvDrXuHV~T)#AXAr*+6VI5StCeW&^R=KrG{C9Oc#03mA|u+rKfJ3q4%k z?#V5Bv**kD_|wy>L6DM*qzX; z9Q}r1ABA9hii2j7C8|rfW`Na3tUDL2GSr_!VrWCxX}az!vYHd?`H9>GvC^7oTMn=W z`j!LU1&u3*&4r?=Q*)Rf7VO3GH^8ztqklgNLGPm<+8BDMNSPHxJw~kVKw%L`)lQGU zp`$!2rUiT3Lu0xF*m;)O&)XaLhaE~gJW^y;Do9~V6D=QLy{u7er)|v`DdZcr*c-Mu zR%8KW2%(#*(QjB|Z&>3bZ`(CP2{kGVok(kSw%Dnl`8>55TBD94tYhsNuf!gZqp&UZPB74Iky}YgD7%kKk3od|I6MN`~gsU@$0{wN<>Gx3pqhE_*|a+W!OGK!H6cn{B#4%jjXsF4it$1@1b{# z*aaje66c(v<2$89g$jyFQdp2d7PM)Hjf1laE<#>geC`*iRn=>^l_fdDd9;%ViYY3t z{ZQs)K};z1ZzKl|wmvx!ET-Pkfl3$O?AZ~6f0|r6ROEU1Ech0&GkRM^7WqBpTjVdO zZxSy=%eH^FTVN)i(c8R@^EmKY4n3u?-O0vy!FVNimld_wI>172{VEICL_w8-&?SjK zdyh$`RKk0BpM8c&vMRzyyg-z}g}bSQaPMM{R9FWAef!`9$NI}wFerJf#vf{l9wDka zhf5hi9pT$eQHWgd=*=^;q{5-#N=cLuyxH4WA4^-6KXM_pEOCMKw&blVTnaIDBb z@mXZF;Jg$T@2Dafk`M}N7{(6S7rLuS!1cJQ8Dv8@wJArqujL4z)`9)TmAxFEC{llX z7QMuj#bZe&H>VUpR5{@BMi+BP2+BRLpn#s65H5K|spN3=h<216E% z9C=qr^r?6WbvOlrf4QkPW^m@4TH&Y958#8pB&pZGl8CTPVL$&4QSJ_^Z)yJITE&en zotcltr8CREM+3zAW_`0x?Y@*SIb*a?u9#S2&84@7Mie0XHClE_fvwoe%P!iC*097d zH=UxwXJyKP#h1IRk(?kT$#{GUsD_vz8VSq<-E1FrNSAQ)Ip(q7D-DPQQmxFLt7Y$K z^>iWG*QS5K@;HjEOC%H&7}jYU9_x7RZ}Qt!w$JQOdg zXnbTxwN=QbiGxn!;GV7@!}lP8??YR{4TPQVr8b!?jW|| z#71u%FEe1yNt|U52b{lzDTE!8Z`Gz;mXV;(n7vqa;ja;O9p9*|3PQL;(S7ZH9s)Q$ ziEg>SIJ?f4wX14fCzz?Mg{@c>nKiwsOr3%9icGk?Iabr zR*y>M;*(a3Mj7(7Qq>}@nAxRAije7+PnQ`XB^Fdyfym-uaYBkrmWq|!_bX7)RU9(g z_`us7Lsmgig$dI60`WS@g4FZ^20Dk4gLpyV8!JW*G;1|JZV5b%;YuP7I(_tF4{&8W z(5iQ@gS$dp(%C}RalqH`yUIh4?1{h`%O;tWO(~J3rEvX>_*?kTp}2eX^n5Nc9ir;^ zPJ%(cya;dU+W4%#R^s1JTHufAlp+>}i!KqFvAO?keG>~Pclf|fgzvW6e{vYK$HHLd zD-*a80%{l`P=*sOmY$cPi%T?v=IY9Gm}5G2c;{19?=(RvPh2rdCPd4*OHO%d&rNfr z>2)c)>12V*1Nb(+_lExBKAL0@sO-ZyZs5aBCd z*&&Gcm%9Qsba6Y@tmy8cEZiKcUk1E_nV%ZPPId-msAgtd(Mb3H)G*dl%&TW~t;ME{jcU*Aeg|DyX8O&q<6qd)mLx|T(mlmc3lFA*{iVCr}a z$owOtRt6WqR-LrjDgY#HuAJi3CGg{;U5 zXc_LI?0KQo8s`*6C@ZLm7^>>m3MXB0JTuqX3Ev9#qf- zDTH@k%&#eFuHh?>KUP!}y0Xrf*tSN*LicYRFG?*f#Q_Q+1;s;hxIqDMTgFi1`P6u%DKYXB zON=z~;_~Q3QD*3RNf=(B@p3{}RBXPLk`$TlIeX;dz{#d%-X3`dY|$2Z3U;Y2a@iQ< zvLlO@4~-gZjtsha%;t~@8G{I%*!=C7_+kYU${;aO`wOXKzdbc>`F?x0kwy+I@ckAF z8!=JSnM;xU3YvaZoIkE>#E zbNDF-cqz6@j$$V%CpoBQW<0;Gj#GSc-05`0r%g^?x6_Fy1`m!8kA|HV2-8Qz_q8B+ x7cxFER|y}t{i15p!>&>YL)_~~Hr%hh+T-pqSg&u^{~rJV|Nln*d~MPJ0RUT^i1Ppd diff --git a/documentation/en/api-v1-methods.md b/documentation/en/api-v1-methods.md index 89136ca40..efe7f92de 100644 --- a/documentation/en/api-v1-methods.md +++ b/documentation/en/api-v1-methods.md @@ -1,8 +1,6 @@ # Groups * [](#) * [Discover](#discover) -* [Actor](#actor) - * [ActorSectorSize](#actorsectorsize) * [Auth](#auth) * [AuthNew](#authnew) * [AuthVerify](#authverify) @@ -11,14 +9,6 @@ * [BlockstoreGetSize](#blockstoregetsize) * [BlockstoreHas](#blockstorehas) * [Boost](#boost) - * [BoostDagstoreDestroyShard](#boostdagstoredestroyshard) - * [BoostDagstoreGC](#boostdagstoregc) - * [BoostDagstoreInitializeAll](#boostdagstoreinitializeall) - * [BoostDagstoreInitializeShard](#boostdagstoreinitializeshard) - * [BoostDagstoreListShards](#boostdagstorelistshards) - * [BoostDagstorePiecesContainingMultihash](#boostdagstorepiecescontainingmultihash) - * [BoostDagstoreRecoverShard](#boostdagstorerecovershard) - * [BoostDagstoreRegisterShard](#boostdagstoreregistershard) * [BoostDeal](#boostdeal) * [BoostDealBySignedProposalCid](#boostdealbysignedproposalcid) * [BoostDummyDeal](#boostdummydeal) @@ -28,39 +18,11 @@ * [BoostIndexerListMultihashes](#boostindexerlistmultihashes) * [BoostMakeDeal](#boostmakedeal) * [BoostOfflineDealWithData](#boostofflinedealwithdata) -* [Deals](#deals) - * [DealsConsiderOfflineRetrievalDeals](#dealsconsiderofflineretrievaldeals) - * [DealsConsiderOfflineStorageDeals](#dealsconsiderofflinestoragedeals) - * [DealsConsiderOnlineRetrievalDeals](#dealsconsideronlineretrievaldeals) - * [DealsConsiderOnlineStorageDeals](#dealsconsideronlinestoragedeals) - * [DealsConsiderUnverifiedStorageDeals](#dealsconsiderunverifiedstoragedeals) - * [DealsConsiderVerifiedStorageDeals](#dealsconsiderverifiedstoragedeals) - * [DealsPieceCidBlocklist](#dealspiececidblocklist) - * [DealsSetConsiderOfflineRetrievalDeals](#dealssetconsiderofflineretrievaldeals) - * [DealsSetConsiderOfflineStorageDeals](#dealssetconsiderofflinestoragedeals) - * [DealsSetConsiderOnlineRetrievalDeals](#dealssetconsideronlineretrievaldeals) - * [DealsSetConsiderOnlineStorageDeals](#dealssetconsideronlinestoragedeals) - * [DealsSetConsiderUnverifiedStorageDeals](#dealssetconsiderunverifiedstoragedeals) - * [DealsSetConsiderVerifiedStorageDeals](#dealssetconsiderverifiedstoragedeals) - * [DealsSetPieceCidBlocklist](#dealssetpiececidblocklist) * [I](#i) * [ID](#id) * [Log](#log) * [LogList](#loglist) * [LogSetLevel](#logsetlevel) -* [Market](#market) - * [MarketCancelDataTransfer](#marketcanceldatatransfer) - * [MarketDataTransferUpdates](#marketdatatransferupdates) - * [MarketGetAsk](#marketgetask) - * [MarketGetRetrievalAsk](#marketgetretrievalask) - * [MarketImportDealData](#marketimportdealdata) - * [MarketListDataTransfers](#marketlistdatatransfers) - * [MarketListIncompleteDeals](#marketlistincompletedeals) - * [MarketListRetrievalDeals](#marketlistretrievaldeals) - * [MarketPendingDeals](#marketpendingdeals) - * [MarketRestartDataTransfer](#marketrestartdatatransfer) - * [MarketSetAsk](#marketsetask) - * [MarketSetRetrievalAsk](#marketsetretrievalask) * [Net](#net) * [NetAddrsListen](#netaddrslisten) * [NetAgentVersion](#netagentversion) @@ -89,10 +51,6 @@ * [OnlineBackup](#onlinebackup) * [Pd](#pd) * [PdBuildIndexForPieceCid](#pdbuildindexforpiececid) -* [Runtime](#runtime) - * [RuntimeSubsystems](#runtimesubsystems) -* [Sectors](#sectors) - * [SectorsRefs](#sectorsrefs) ## @@ -115,23 +73,6 @@ Response: } ``` -## Actor - - -### ActorSectorSize -There are not yet any comments for this method. - -Perms: read - -Inputs: -```json -[ - "f01234" -] -``` - -Response: `34359738368` - ## Auth @@ -224,146 +165,6 @@ Response: `true` ## Boost -### BoostDagstoreDestroyShard - - -Perms: admin - -Inputs: -```json -[ - "string value" -] -``` - -Response: `{}` - -### BoostDagstoreGC - - -Perms: admin - -Inputs: `null` - -Response: -```json -[ - { - "Key": "baga6ea4seaqecmtz7iak33dsfshi627abz4i4665dfuzr3qfs4bmad6dx3iigdq", - "Success": false, - "Error": "\u003cerror\u003e" - } -] -``` - -### BoostDagstoreInitializeAll - - -Perms: admin - -Inputs: -```json -[ - { - "MaxConcurrency": 123, - "IncludeSealed": true - } -] -``` - -Response: -```json -{ - "Key": "string value", - "Event": "string value", - "Success": true, - "Error": "string value", - "Total": 123, - "Current": 123 -} -``` - -### BoostDagstoreInitializeShard - - -Perms: admin - -Inputs: -```json -[ - "string value" -] -``` - -Response: `{}` - -### BoostDagstoreListShards - - -Perms: admin - -Inputs: `null` - -Response: -```json -[ - { - "Key": "baga6ea4seaqecmtz7iak33dsfshi627abz4i4665dfuzr3qfs4bmad6dx3iigdq", - "State": "ShardStateAvailable", - "Error": "\u003cerror\u003e" - } -] -``` - -### BoostDagstorePiecesContainingMultihash - - -Perms: read - -Inputs: -```json -[ - "Bw==" -] -``` - -Response: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -### BoostDagstoreRecoverShard - - -Perms: admin - -Inputs: -```json -[ - "string value" -] -``` - -Response: `{}` - -### BoostDagstoreRegisterShard - - -Perms: admin - -Inputs: -```json -[ - "string value" -] -``` - -Response: `{}` - ### BoostDeal @@ -507,707 +308,129 @@ Inputs: ```json [ { - "DealUUID": "07070707-0707-0707-0707-070707070707", - "IsOffline": true, - "ClientDealProposal": { - "Proposal": { - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1032, - "VerifiedDeal": true, - "Client": "f01234", - "Provider": "f01234", - "Label": "", - "StartEpoch": 10101, - "EndEpoch": 10101, - "StoragePricePerEpoch": "0", - "ProviderCollateral": "0", - "ClientCollateral": "0" - }, - "ClientSignature": { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - } - }, - "DealDataRoot": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Transfer": { - "Type": "string value", - "ClientID": "string value", - "Params": "Ynl0ZSBhcnJheQ==", - "Size": 42 - }, - "RemoveUnsealedCopy": true, - "SkipIPNIAnnounce": true - } -] -``` - -Response: -```json -{ - "Accepted": true, - "Reason": "string value" -} -``` - -### BoostIndexerAnnounceAllDeals -There are not yet any comments for this method. - -Perms: admin - -Inputs: `null` - -Response: `{}` - -### BoostIndexerAnnounceLatest - - -Perms: admin - -Inputs: `null` - -Response: -```json -{ - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" -} -``` - -### BoostIndexerAnnounceLatestHttp - - -Perms: admin - -Inputs: -```json -[ - [ - "string value" - ] -] -``` - -Response: -```json -{ - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" -} -``` - -### BoostIndexerListMultihashes - - -Perms: admin - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: -```json -[ - "Bw==" -] -``` - -### BoostMakeDeal - - -Perms: write - -Inputs: -```json -[ - { - "DealUUID": "07070707-0707-0707-0707-070707070707", - "IsOffline": true, - "ClientDealProposal": { - "Proposal": { - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1032, - "VerifiedDeal": true, - "Client": "f01234", - "Provider": "f01234", - "Label": "", - "StartEpoch": 10101, - "EndEpoch": 10101, - "StoragePricePerEpoch": "0", - "ProviderCollateral": "0", - "ClientCollateral": "0" - }, - "ClientSignature": { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - } - }, - "DealDataRoot": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Transfer": { - "Type": "string value", - "ClientID": "string value", - "Params": "Ynl0ZSBhcnJheQ==", - "Size": 42 - }, - "RemoveUnsealedCopy": true, - "SkipIPNIAnnounce": true - } -] -``` - -Response: -```json -{ - "Accepted": true, - "Reason": "string value" -} -``` - -### BoostOfflineDealWithData - - -Perms: admin - -Inputs: -```json -[ - "07070707-0707-0707-0707-070707070707", - "string value", - true -] -``` - -Response: -```json -{ - "Accepted": true, - "Reason": "string value" -} -``` - -## Deals - - -### DealsConsiderOfflineRetrievalDeals - - -Perms: admin - -Inputs: `null` - -Response: `true` - -### DealsConsiderOfflineStorageDeals - - -Perms: admin - -Inputs: `null` - -Response: `true` - -### DealsConsiderOnlineRetrievalDeals - - -Perms: admin - -Inputs: `null` - -Response: `true` - -### DealsConsiderOnlineStorageDeals -There are not yet any comments for this method. - -Perms: admin - -Inputs: `null` - -Response: `true` - -### DealsConsiderUnverifiedStorageDeals - - -Perms: admin - -Inputs: `null` - -Response: `true` - -### DealsConsiderVerifiedStorageDeals - - -Perms: admin - -Inputs: `null` - -Response: `true` - -### DealsPieceCidBlocklist - - -Perms: admin - -Inputs: `null` - -Response: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -### DealsSetConsiderOfflineRetrievalDeals - - -Perms: admin - -Inputs: -```json -[ - true -] -``` - -Response: `{}` - -### DealsSetConsiderOfflineStorageDeals - - -Perms: admin - -Inputs: -```json -[ - true -] -``` - -Response: `{}` - -### DealsSetConsiderOnlineRetrievalDeals - - -Perms: admin - -Inputs: -```json -[ - true -] -``` - -Response: `{}` - -### DealsSetConsiderOnlineStorageDeals - - -Perms: admin - -Inputs: -```json -[ - true -] -``` - -Response: `{}` - -### DealsSetConsiderUnverifiedStorageDeals - - -Perms: admin - -Inputs: -```json -[ - true -] -``` - -Response: `{}` - -### DealsSetConsiderVerifiedStorageDeals - - -Perms: admin - -Inputs: -```json -[ - true -] -``` - -Response: `{}` - -### DealsSetPieceCidBlocklist - - -Perms: admin - -Inputs: -```json -[ - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - ] -] -``` - -Response: `{}` - -## I - - -### ID - - -Perms: read - -Inputs: `null` - -Response: `"12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"` - -## Log - - -### LogList - - -Perms: write - -Inputs: `null` - -Response: -```json -[ - "string value" -] -``` - -### LogSetLevel - - -Perms: write - -Inputs: -```json -[ - "string value", - "string value" -] -``` - -Response: `{}` - -## Market - - -### MarketCancelDataTransfer - - -Perms: write - -Inputs: -```json -[ - 3, - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - true + "DealUUID": "07070707-0707-0707-0707-070707070707", + "IsOffline": true, + "ClientDealProposal": { + "Proposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "ClientSignature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + }, + "DealDataRoot": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Transfer": { + "Type": "string value", + "ClientID": "string value", + "Params": "Ynl0ZSBhcnJheQ==", + "Size": 42 + }, + "RemoveUnsealedCopy": true, + "SkipIPNIAnnounce": true + } ] ``` -Response: `{}` - -### MarketDataTransferUpdates - - -Perms: write - -Inputs: `null` - Response: ```json { - "TransferID": 3, - "Status": 1, - "BaseCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "IsInitiator": true, - "IsSender": true, - "Voucher": "string value", - "Message": "string value", - "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Transferred": 42, - "Stages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - } + "Accepted": true, + "Reason": "string value" } ``` -### MarketGetAsk - +### BoostIndexerAnnounceAllDeals +There are not yet any comments for this method. -Perms: read +Perms: admin Inputs: `null` -Response: -```json -{ - "Ask": { - "Price": "0", - "VerifiedPrice": "0", - "MinPieceSize": 1032, - "MaxPieceSize": 1032, - "Miner": "f01234", - "Timestamp": 10101, - "Expiry": 10101, - "SeqNo": 42 - }, - "Signature": { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - } -} -``` +Response: `{}` -### MarketGetRetrievalAsk +### BoostIndexerAnnounceLatest -Perms: read +Perms: admin Inputs: `null` Response: ```json { - "PricePerByte": "0", - "UnsealPrice": "0", - "PaymentInterval": 42, - "PaymentIntervalIncrease": 42 + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" } ``` -### MarketImportDealData +### BoostIndexerAnnounceLatestHttp -Perms: write +Perms: admin Inputs: ```json [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "string value" + [ + "string value" + ] ] ``` -Response: `{}` - -### MarketListDataTransfers - - -Perms: write - -Inputs: `null` - Response: ```json -[ - { - "TransferID": 3, - "Status": 1, - "BaseCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "IsInitiator": true, - "IsSender": true, - "Voucher": "string value", - "Message": "string value", - "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Transferred": 42, - "Stages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - } - } -] +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} ``` -### MarketListIncompleteDeals - +### BoostIndexerListMultihashes -Perms: read -Inputs: `null` +Perms: admin -Response: +Inputs: ```json [ { - "Proposal": { - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1032, - "VerifiedDeal": true, - "Client": "f01234", - "Provider": "f01234", - "Label": "", - "StartEpoch": 10101, - "EndEpoch": 10101, - "StoragePricePerEpoch": "0", - "ProviderCollateral": "0", - "ClientCollateral": "0" - }, - "ClientSignature": { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - }, - "ProposalCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "AddFundsCid": null, - "PublishCid": null, - "Miner": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Client": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "State": 42, - "PiecePath": ".lotusminer/fstmp123", - "MetadataPath": ".lotusminer/fstmp123", - "SlashEpoch": 10101, - "FastRetrieval": true, - "Message": "string value", - "FundsReserved": "0", - "Ref": { - "TransferType": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceCid": null, - "PieceSize": 1024, - "RawBlockSize": 42 - }, - "AvailableForRetrieval": true, - "DealID": 5432, - "CreationTime": "0001-01-01T00:00:00Z", - "TransferChannelId": { - "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "ID": 3 - }, - "SectorNumber": 9, - "InboundCAR": "string value" + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" } ] ``` -### MarketListRetrievalDeals -There are not yet any comments for this method. - -Perms: read - -Inputs: `null` - Response: ```json [ - { - "PayloadCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "ID": 5, - "Selector": { - "Raw": "Ynl0ZSBhcnJheQ==" - }, - "PieceCID": null, - "PricePerByte": "0", - "PaymentInterval": 42, - "PaymentIntervalIncrease": 42, - "UnsealPrice": "0", - "StoreID": 42, - "ChannelID": { - "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "ID": 3 - }, - "PieceInfo": { - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Deals": [ - { - "DealID": 5432, - "SectorID": 9, - "Offset": 1032, - "Length": 1032 - } - ] - }, - "Status": 0, - "Receiver": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "TotalSent": 42, - "FundsReceived": "0", - "Message": "string value", - "CurrentInterval": 42, - "LegacyProtocol": true - } + "Bw==" ] ``` -### MarketPendingDeals +### BoostMakeDeal Perms: write -Inputs: `null` - -Response: +Inputs: ```json -{ - "Deals": [ - { +[ + { + "DealUUID": "07070707-0707-0707-0707-070707070707", + "IsOffline": true, + "ClientDealProposal": { "Proposal": { "PieceCID": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" @@ -1227,61 +450,91 @@ Response: "Type": 2, "Data": "Ynl0ZSBhcnJheQ==" } - } - ], - "PublishPeriodStart": "0001-01-01T00:00:00Z", - "PublishPeriod": 60000000000 + }, + "DealDataRoot": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Transfer": { + "Type": "string value", + "ClientID": "string value", + "Params": "Ynl0ZSBhcnJheQ==", + "Size": 42 + }, + "RemoveUnsealedCopy": true, + "SkipIPNIAnnounce": true + } +] +``` + +Response: +```json +{ + "Accepted": true, + "Reason": "string value" } ``` -### MarketRestartDataTransfer +### BoostOfflineDealWithData -Perms: write +Perms: admin Inputs: ```json [ - 3, - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "07070707-0707-0707-0707-070707070707", + "string value", true ] ``` -Response: `{}` +Response: +```json +{ + "Accepted": true, + "Reason": "string value" +} +``` -### MarketSetAsk +## I -Perms: admin +### ID -Inputs: + +Perms: read + +Inputs: `null` + +Response: `"12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"` + +## Log + + +### LogList + + +Perms: write + +Inputs: `null` + +Response: ```json [ - "0", - "0", - 10101, - 1032, - 1032 + "string value" ] ``` -Response: `{}` - -### MarketSetRetrievalAsk +### LogSetLevel -Perms: admin +Perms: write Inputs: ```json [ - { - "PricePerByte": "0", - "UnsealPrice": "0", - "PaymentInterval": 42, - "PaymentIntervalIncrease": 42 - } + "string value", + "string value" ] ``` @@ -1827,45 +1080,3 @@ Inputs: Response: `{}` -## Runtime - - -### RuntimeSubsystems -RuntimeSubsystems returns the subsystems that are enabled -in this instance. - - -Perms: read - -Inputs: `null` - -Response: -```json -[ - "Markets" -] -``` - -## Sectors - - -### SectorsRefs - - -Perms: read - -Inputs: `null` - -Response: -```json -{ - "98000": [ - { - "SectorID": 100, - "Offset": 10485760, - "Size": 1048576 - } - ] -} -``` - From afafaf9aa5ec76b92bed35c8c172aff7da1f2166 Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Fri, 20 Oct 2023 17:00:49 +0400 Subject: [PATCH 03/34] fix circular depndencies --- cmd/boostd/dagstore.go | 357 -------------------- cmd/boostd/main.go | 3 - gql/module.go | 68 ++++ gql/resolver_dealpublish.go | 2 +- node/builder.go | 6 +- node/modules/graphsync.go | 23 +- node/modules/piecedirectory.go | 20 -- node/modules/storageminer.go | 29 +- retrievalmarket/server/channelstate.go | 24 +- retrievalmarket/server/gsunpaidretrieval.go | 20 +- retrievalmarket/server/queryask.go | 5 +- 11 files changed, 117 insertions(+), 440 deletions(-) delete mode 100644 cmd/boostd/dagstore.go create mode 100644 gql/module.go diff --git a/cmd/boostd/dagstore.go b/cmd/boostd/dagstore.go deleted file mode 100644 index 2a44308f0..000000000 --- a/cmd/boostd/dagstore.go +++ /dev/null @@ -1,357 +0,0 @@ -package main - -import ( - "fmt" - "os" - "strings" - - "github.com/filecoin-project/lotus/lib/tablewriter" - "github.com/ipfs/go-cid" - - "github.com/fatih/color" - bapi "github.com/filecoin-project/boost/api" - bcli "github.com/filecoin-project/boost/cli" - lcli "github.com/filecoin-project/lotus/cli" - "github.com/urfave/cli/v2" -) - -var dagstoreCmd = &cli.Command{ - Name: "dagstore", - Usage: "Manage the dagstore on the Boost subsystem", - Subcommands: []*cli.Command{ - dagstoreRegisterShardCmd, - dagstoreInitializeShardCmd, - dagstoreRecoverShardCmd, - dagstoreInitializeAllCmd, - dagstoreListShardsCmd, - dagstoreGcCmd, - dagstoreDestroyShardCmd, - dagstoreLookupCmd, - }, -} - -var dagstoreGcCmd = &cli.Command{ - Name: "gc", - Usage: "Garbage collect the dagstore", - Flags: []cli.Flag{}, - Action: func(cctx *cli.Context) error { - ctx := lcli.ReqContext(cctx) - napi, closer, err := bcli.GetBoostAPI(cctx) - if err != nil { - return err - } - defer closer() - - collected, err := napi.BoostDagstoreGC(ctx) - if err != nil { - return err - } - - if len(collected) == 0 { - _, _ = fmt.Fprintln(os.Stdout, "no shards collected") - return nil - } - - for _, e := range collected { - if e.Error == "" { - _, _ = fmt.Fprintln(os.Stdout, e.Key, color.New(color.FgGreen).Sprint("SUCCESS")) - } else { - _, _ = fmt.Fprintln(os.Stdout, e.Key, color.New(color.FgRed).Sprint("ERROR"), e.Error) - } - } - - return nil - }, -} - -var dagstoreListShardsCmd = &cli.Command{ - Name: "list-shards", - Usage: "List all shards known to the dagstore, with their current status", - Flags: []cli.Flag{}, - Action: func(cctx *cli.Context) error { - ctx := lcli.ReqContext(cctx) - napi, closer, err := bcli.GetBoostAPI(cctx) - if err != nil { - return err - } - defer closer() - - shards, err := napi.BoostDagstoreListShards(ctx) - if err != nil { - return err - } - - return printTableShards(shards) - }, -} - -func printTableShards(shards []bapi.DagstoreShardInfo) error { - if len(shards) == 0 { - return nil - } - - tw := tablewriter.New( - tablewriter.Col("Key"), - tablewriter.Col("State"), - tablewriter.Col("Error"), - ) - - colors := map[string]color.Attribute{ - "ShardStateAvailable": color.FgGreen, - "ShardStateServing": color.FgBlue, - "ShardStateErrored": color.FgRed, - "ShardStateNew": color.FgYellow, - } - - for _, s := range shards { - m := map[string]interface{}{ - "Key": s.Key, - "State": func() string { - trimmedState := strings.TrimPrefix(s.State, "ShardState") - if c, ok := colors[s.State]; ok { - return color.New(c).Sprint(trimmedState) - } - return trimmedState - }(), - "Error": s.Error, - } - tw.Write(m) - } - return tw.Flush(os.Stdout) -} - -var dagstoreRegisterShardCmd = &cli.Command{ - Name: "register-shard", - ArgsUsage: "[key]", - Usage: "Register a shard", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - DefaultText: "depends on output being a TTY", - }, - }, - Action: func(cctx *cli.Context) error { - if cctx.IsSet("color") { - color.NoColor = !cctx.Bool("color") - } - - if cctx.NArg() != 1 { - return fmt.Errorf("must provide a single shard key") - } - - napi, closer, err := bcli.GetBoostAPI(cctx) - if err != nil { - return err - } - defer closer() - - ctx := lcli.ReqContext(cctx) - - shardKey := cctx.Args().First() - err = napi.BoostDagstoreRegisterShard(ctx, shardKey) - if err != nil { - return err - } - - fmt.Println("Registered shard " + shardKey) - return nil - }, -} - -var dagstoreInitializeShardCmd = &cli.Command{ - Name: "initialize-shard", - ArgsUsage: "[key]", - Usage: "Initialize the specified shard", - Flags: []cli.Flag{}, - Action: func(cctx *cli.Context) error { - if cctx.NArg() != 1 { - return fmt.Errorf("must provide a single shard key") - } - - ctx := lcli.ReqContext(cctx) - napi, closer, err := bcli.GetBoostAPI(cctx) - if err != nil { - return err - } - defer closer() - - return napi.BoostDagstoreInitializeShard(ctx, cctx.Args().First()) - }, -} - -var dagstoreInitializeAllCmd = &cli.Command{ - Name: "initialize-all", - Usage: "Initialize all uninitialized shards, streaming results as they're produced; only shards for unsealed pieces are initialized by default", - Flags: []cli.Flag{ - &cli.UintFlag{ - Name: "concurrency", - Usage: "maximum shards to initialize concurrently at a time; use 0 for unlimited", - Required: true, - }, - &cli.BoolFlag{ - Name: "include-sealed", - Usage: "initialize sealed pieces as well", - }, - }, - Action: func(cctx *cli.Context) error { - concurrency := cctx.Uint("concurrency") - sealed := cctx.Bool("include-sealed") - - ctx := lcli.ReqContext(cctx) - - napi, closer, err := bcli.GetBoostAPI(cctx) - if err != nil { - return err - } - defer closer() - - params := bapi.DagstoreInitializeAllParams{ - MaxConcurrency: int(concurrency), - IncludeSealed: sealed, - } - - ch, err := napi.BoostDagstoreInitializeAll(ctx, params) - if err != nil { - return err - } - - for { - select { - case evt, ok := <-ch: - if !ok { - return nil - } - _, _ = fmt.Fprint(os.Stdout, color.New(color.BgHiBlack).Sprintf("(%d/%d)", evt.Current, evt.Total)) - _, _ = fmt.Fprint(os.Stdout, " ") - if evt.Event == "start" { - _, _ = fmt.Fprintln(os.Stdout, evt.Key, color.New(color.Reset).Sprint("STARTING")) - } else { - if evt.Success { - _, _ = fmt.Fprintln(os.Stdout, evt.Key, color.New(color.FgGreen).Sprint("SUCCESS")) - } else { - _, _ = fmt.Fprintln(os.Stdout, evt.Key, color.New(color.FgRed).Sprint("ERROR"), evt.Error) - } - } - - case <-ctx.Done(): - return fmt.Errorf("aborted") - } - } - }, -} - -var dagstoreRecoverShardCmd = &cli.Command{ - Name: "recover-shard", - ArgsUsage: "[key]", - Usage: "Attempt to recover a shard in errored state", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - DefaultText: "depends on output being a TTY", - }, - }, - Action: func(cctx *cli.Context) error { - if cctx.IsSet("color") { - color.NoColor = !cctx.Bool("color") - } - - if cctx.NArg() != 1 { - return fmt.Errorf("must provide a single shard key") - } - - napi, closer, err := bcli.GetBoostAPI(cctx) - if err != nil { - return err - } - defer closer() - - ctx := lcli.ReqContext(cctx) - - return napi.BoostDagstoreRecoverShard(ctx, cctx.Args().First()) - }, -} - -var dagstoreDestroyShardCmd = &cli.Command{ - Name: "destroy-shard", - ArgsUsage: "[key]", - Usage: "Destroy a shard", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - DefaultText: "depends on output being a TTY", - }, - }, - Action: func(cctx *cli.Context) error { - if cctx.IsSet("color") { - color.NoColor = !cctx.Bool("color") - } - - if cctx.NArg() != 1 { - return fmt.Errorf("must provide a single shard key") - } - - napi, closer, err := bcli.GetBoostAPI(cctx) - if err != nil { - return err - } - defer closer() - - ctx := lcli.ReqContext(cctx) - - shardKey := cctx.Args().First() - err = napi.BoostDagstoreDestroyShard(ctx, shardKey) - if err != nil { - return err - } - - fmt.Println("Destroyed shard " + shardKey) - return nil - }, -} - -var dagstoreLookupCmd = &cli.Command{ - Name: "lookup-piece-cid", - ArgsUsage: "[key]", - Usage: "Performs a reverse lookup with payload CID to get Piece CID", - Aliases: []string{"lpc"}, - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - DefaultText: "depends on output being a TTY", - }, - }, - Action: func(cctx *cli.Context) error { - if cctx.IsSet("color") { - color.NoColor = !cctx.Bool("color") - } - - if cctx.NArg() != 1 { - return fmt.Errorf("must provide a single payload CID") - } - - napi, closer, err := bcli.GetBoostAPI(cctx) - if err != nil { - return err - } - defer closer() - - ctx := lcli.ReqContext(cctx) - - shardKey := cctx.Args().First() - payloadCid, err := cid.Parse(shardKey) - if err != nil { - return fmt.Errorf("Unable to parse the provided CID: %s", shardKey) - } - pieceCid, err := napi.BoostDagstorePiecesContainingMultihash(ctx, payloadCid.Hash()) - if err != nil { - return err - } - - fmt.Printf("Given CID was found in the following pieces: %s", pieceCid) - return nil - }, -} diff --git a/cmd/boostd/main.go b/cmd/boostd/main.go index bcd9b209c..7fcb2b391 100644 --- a/cmd/boostd/main.go +++ b/cmd/boostd/main.go @@ -42,12 +42,9 @@ func main() { restoreCmd, configCmd, dummydealCmd, - dataTransfersCmd, - retrievalDealsCmd, indexProvCmd, importDataCmd, logCmd, - dagstoreCmd, netCmd, pieceDirCmd, recoverCmd, diff --git a/gql/module.go b/gql/module.go new file mode 100644 index 000000000..139e3f81d --- /dev/null +++ b/gql/module.go @@ -0,0 +1,68 @@ +package gql + +import ( + "context" + + "github.com/filecoin-project/boost/cmd/lib" + "github.com/filecoin-project/boost/db" + "github.com/filecoin-project/boost/fundmanager" + "github.com/filecoin-project/boost/indexprovider" + "github.com/filecoin-project/boost/lib/legacy" + "github.com/filecoin-project/boost/lib/mpoolmonitor" + "github.com/filecoin-project/boost/markets/storageadapter" + "github.com/filecoin-project/boost/node/config" + "github.com/filecoin-project/boost/piecedirectory" + "github.com/filecoin-project/boost/retrievalmarket/rtvllog" + "github.com/filecoin-project/boost/sectorstatemgr" + "github.com/filecoin-project/boost/storagemanager" + "github.com/filecoin-project/boost/storagemarket" + "github.com/filecoin-project/boost/storagemarket/sealingpipeline" + "github.com/filecoin-project/boost/storagemarket/storedask" + "github.com/filecoin-project/lotus/api/v1api" + "github.com/filecoin-project/lotus/node/repo" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + provider "github.com/ipni/index-provider" + "github.com/libp2p/go-libp2p/core/host" + "go.uber.org/fx" +) + +func NewGraphqlServer(cfg *config.Boost) func(lc fx.Lifecycle, r repo.LockedRepo, h host.Host, prov *storagemarket.Provider, dealsDB *db.DealsDB, logsDB *db.LogsDB, retDB *rtvllog.RetrievalLogDB, plDB *db.ProposalLogsDB, fundsDB *db.FundsDB, fundMgr *fundmanager.FundManager, storageMgr *storagemanager.StorageManager, publisher *storageadapter.DealPublisher, spApi sealingpipeline.API, legacyDeals legacy.LegacyDealManager, piecedirectory *piecedirectory.PieceDirectory, indexProv provider.Interface, idxProvWrapper *indexprovider.Wrapper, fullNode v1api.FullNode, bg BlockGetter, ssm *sectorstatemgr.SectorStateMgr, mpool *mpoolmonitor.MpoolMonitor, mma *lib.MultiMinerAccessor, sask *storedask.StoredAsk) *Server { + return func(lc fx.Lifecycle, r repo.LockedRepo, h host.Host, prov *storagemarket.Provider, dealsDB *db.DealsDB, logsDB *db.LogsDB, retDB *rtvllog.RetrievalLogDB, plDB *db.ProposalLogsDB, fundsDB *db.FundsDB, fundMgr *fundmanager.FundManager, + storageMgr *storagemanager.StorageManager, publisher *storageadapter.DealPublisher, spApi sealingpipeline.API, + legacyDeals legacy.LegacyDealManager, piecedirectory *piecedirectory.PieceDirectory, + indexProv provider.Interface, idxProvWrapper *indexprovider.Wrapper, fullNode v1api.FullNode, bg BlockGetter, + ssm *sectorstatemgr.SectorStateMgr, mpool *mpoolmonitor.MpoolMonitor, mma *lib.MultiMinerAccessor, sask *storedask.StoredAsk) *Server { + + resolverCtx, cancel := context.WithCancel(context.Background()) + resolver := NewResolver(resolverCtx, cfg, r, h, dealsDB, logsDB, retDB, plDB, fundsDB, fundMgr, storageMgr, spApi, prov, legacyDeals, piecedirectory, publisher, indexProv, idxProvWrapper, fullNode, ssm, mpool, mma, sask) + svr := NewServer(cfg, resolver, bg) + + lc.Append(fx.Hook{ + OnStart: svr.Start, + OnStop: func(ctx context.Context) error { + cancel() + return svr.Stop(ctx) + }, + }) + + return svr + } +} + +func NewBlockGetter(pd *piecedirectory.PieceDirectory) BlockGetter { + return &pdBlockGetter{pd: pd} +} + +type pdBlockGetter struct { + pd *piecedirectory.PieceDirectory +} + +func (p *pdBlockGetter) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) { + bz, err := p.pd.BlockstoreGet(ctx, c) + if err != nil { + return nil, err + } + + return blocks.NewBlockWithCid(bz, c) +} diff --git a/gql/resolver_dealpublish.go b/gql/resolver_dealpublish.go index e47fc4d33..78cffc48b 100644 --- a/gql/resolver_dealpublish.go +++ b/gql/resolver_dealpublish.go @@ -104,7 +104,7 @@ func (r *resolver) DealPublish(ctx context.Context) (*dealPublishResolver, error // If there are any legacy deals to look up if len(legacyDealIDs) > 0 { // Get all deals from the legacy provider - legacyDeals, err := r.legacyProv.ListLocalDeals() + legacyDeals, err := r.legacyDeals.ListDeals() if err != nil { return nil, fmt.Errorf("getting legacy deals: %w", err) } diff --git a/node/builder.go b/node/builder.go index 836886253..4038421df 100644 --- a/node/builder.go +++ b/node/builder.go @@ -523,8 +523,8 @@ func ConfigBoost(cfg *config.Boost) Option { Override(new(*mpoolmonitor.MpoolMonitor), modules.NewMpoolMonitor(cfg)), // GraphQL server - Override(new(gql.BlockGetter), modules.NewBlockGetter), - Override(new(*gql.Server), modules.NewGraphqlServer(cfg)), + Override(new(gql.BlockGetter), gql.NewBlockGetter), + Override(new(*gql.Server), gql.NewGraphqlServer(cfg)), // Tracing Override(new(*tracing.Tracing), modules.NewTracing(cfg)), @@ -561,7 +561,7 @@ func ConfigBoost(cfg *config.Boost) Option { // Lotus Markets (retrieval) Override(new(server.SectorAccessor), modules.NewSectorAccessor(cfg)), - Override(HandleSetRetrievalAskGetter, modules.NewRetrievalAskGetter), + Override(HandleSetRetrievalAskGetter, server.NewRetrievalAskGetter), Override(HandleRetrievalEventsKey, modules.HandleRetrievalGraphsyncUpdates(time.Duration(cfg.Dealmaking.RetrievalLogDuration), time.Duration(cfg.Dealmaking.StalledRetrievalTimeout))), Override(HandleRetrievalAskKey, modules.HandleQueryAsk), Override(new(*lp2pimpl.TransportsListener), modules.NewTransportsListener(cfg)), diff --git a/node/modules/graphsync.go b/node/modules/graphsync.go index 529e3f563..4fa8a274f 100644 --- a/node/modules/graphsync.go +++ b/node/modules/graphsync.go @@ -15,8 +15,6 @@ import ( "github.com/filecoin-project/boost/piecedirectory" "github.com/filecoin-project/boost/retrievalmarket/server" retrievalimpl "github.com/filecoin-project/boost/retrievalmarket/server" - "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" - "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/metrics" lotus_helpers "github.com/filecoin-project/lotus/node/modules/helpers" "github.com/ipfs/kubo/core/node/helpers" @@ -26,23 +24,6 @@ import ( "go.uber.org/fx" ) -type RetrievalAskGetter struct { - ask legacyretrievaltypes.Ask -} - -func (rag *RetrievalAskGetter) GetAsk() *legacyretrievaltypes.Ask { - return &rag.ask -} - -func NewRetrievalAskGetter() *RetrievalAskGetter { - return &RetrievalAskGetter{ - ask: legacyretrievaltypes.Ask{ - PricePerByte: abi.NewTokenAmount(0), - UnsealPrice: abi.NewTokenAmount(0), - }, - } -} - // LinkSystemProv is used to avoid circular dependencies type LinkSystemProv struct { *ipld.LinkSystem @@ -57,8 +38,8 @@ func (p *LinkSystemProv) LinkSys() *ipld.LinkSystem { } // RetrievalGraphsync creates a graphsync instance used to serve retrievals. -func RetrievalGraphsync(parallelTransfersForStorage uint64, parallelTransfersForStoragePerPeer uint64, parallelTransfersForRetrieval uint64) func(mctx lotus_helpers.MetricsCtx, lc fx.Lifecycle, pid *piecedirectory.PieceDirectory, h host.Host, net dtypes.ProviderTransferNetwork, dealDecider dtypes.RetrievalDealFilter, sa *lib.MultiMinerAccessor, askGetter *RetrievalAskGetter) (*server.GraphsyncUnpaidRetrieval, error) { - return func(mctx lotus_helpers.MetricsCtx, lc fx.Lifecycle, pid *piecedirectory.PieceDirectory, h host.Host, net dtypes.ProviderTransferNetwork, dealDecider dtypes.RetrievalDealFilter, sa *lib.MultiMinerAccessor, askGetter *RetrievalAskGetter) (*server.GraphsyncUnpaidRetrieval, error) { +func RetrievalGraphsync(parallelTransfersForStorage uint64, parallelTransfersForStoragePerPeer uint64, parallelTransfersForRetrieval uint64) func(mctx lotus_helpers.MetricsCtx, lc fx.Lifecycle, pid *piecedirectory.PieceDirectory, h host.Host, net dtypes.ProviderTransferNetwork, dealDecider dtypes.RetrievalDealFilter, sa *lib.MultiMinerAccessor, askGetter *server.RetrievalAskGetter) (*server.GraphsyncUnpaidRetrieval, error) { + return func(mctx lotus_helpers.MetricsCtx, lc fx.Lifecycle, pid *piecedirectory.PieceDirectory, h host.Host, net dtypes.ProviderTransferNetwork, dealDecider dtypes.RetrievalDealFilter, sa *lib.MultiMinerAccessor, askGetter *server.RetrievalAskGetter) (*server.GraphsyncUnpaidRetrieval, error) { // Graphsync tracks metrics separately, pass nil blockMetrics to the remote blockstore rb := remoteblockstore.NewRemoteBlockstore(pid, nil) diff --git a/node/modules/piecedirectory.go b/node/modules/piecedirectory.go index 6274293f8..2e1d77d2f 100644 --- a/node/modules/piecedirectory.go +++ b/node/modules/piecedirectory.go @@ -6,7 +6,6 @@ import ( "time" "github.com/filecoin-project/boost/cmd/lib" - "github.com/filecoin-project/boost/gql" "github.com/filecoin-project/boost/node/config" "github.com/filecoin-project/boost/piecedirectory" "github.com/filecoin-project/boost/sectorstatemgr" @@ -20,8 +19,6 @@ import ( "github.com/filecoin-project/lotus/node/modules/dtypes" lotus_dtypes "github.com/filecoin-project/lotus/node/modules/dtypes" lotus_repo "github.com/filecoin-project/lotus/node/repo" - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" "go.uber.org/fx" ) @@ -163,20 +160,3 @@ func NewPieceDoctor(lc fx.Lifecycle, maddr lotus_dtypes.MinerAddress, store *bdc }) return doc } - -func NewBlockGetter(pd *piecedirectory.PieceDirectory) gql.BlockGetter { - return &pdBlockGetter{pd: pd} -} - -type pdBlockGetter struct { - pd *piecedirectory.PieceDirectory -} - -func (p *pdBlockGetter) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) { - bz, err := p.pd.BlockstoreGet(ctx, c) - if err != nil { - return nil, err - } - - return blocks.NewBlockWithCid(bz, c) -} diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index 79270feae..d5443a77d 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -14,7 +14,6 @@ import ( dtgstransport "github.com/filecoin-project/boost/datatransfer/transport/graphsync" "github.com/filecoin-project/boost/db" "github.com/filecoin-project/boost/fundmanager" - "github.com/filecoin-project/boost/gql" "github.com/filecoin-project/boost/indexprovider" "github.com/filecoin-project/boost/lib/legacy" "github.com/filecoin-project/boost/lib/mpoolmonitor" @@ -24,9 +23,7 @@ import ( "github.com/filecoin-project/boost/node/impl/backupmgr" "github.com/filecoin-project/boost/node/modules/dtypes" "github.com/filecoin-project/boost/piecedirectory" - "github.com/filecoin-project/boost/retrievalmarket/rtvllog" "github.com/filecoin-project/boost/retrievalmarket/server" - "github.com/filecoin-project/boost/sectorstatemgr" "github.com/filecoin-project/boost/storagemanager" "github.com/filecoin-project/boost/storagemarket" "github.com/filecoin-project/boost/storagemarket/logs" @@ -59,7 +56,6 @@ import ( "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/namespace" - provider "github.com/ipni/index-provider" "github.com/libp2p/go-libp2p/core/host" "go.uber.org/fx" "go.uber.org/multierr" @@ -305,7 +301,7 @@ func NewAskDB(sqldb *sql.DB) *db.StorageAskDB { return db.NewStorageAskDB(sqldb) } -func HandleQueryAsk(lc fx.Lifecycle, h host.Host, maddr lotus_dtypes.MinerAddress, pd *piecedirectory.PieceDirectory, sa *lib.MultiMinerAccessor, askStore RetrievalAskGetter, full v1api.FullNode) { +func HandleQueryAsk(lc fx.Lifecycle, h host.Host, maddr lotus_dtypes.MinerAddress, pd *piecedirectory.PieceDirectory, sa *lib.MultiMinerAccessor, askStore server.RetrievalAskGetter, full v1api.FullNode) { handler := server.NewQueryAskHandler(h, address.Address(maddr), pd, sa, askStore, full) lc.Append(fx.Hook{ OnStart: func(ctx context.Context) error { @@ -501,29 +497,6 @@ func NewStorageMarketProvider(provAddr address.Address, cfg *config.Boost) func( } } -func NewGraphqlServer(cfg *config.Boost) func(lc fx.Lifecycle, r repo.LockedRepo, h host.Host, prov *storagemarket.Provider, dealsDB *db.DealsDB, logsDB *db.LogsDB, retDB *rtvllog.RetrievalLogDB, plDB *db.ProposalLogsDB, fundsDB *db.FundsDB, fundMgr *fundmanager.FundManager, storageMgr *storagemanager.StorageManager, publisher *storageadapter.DealPublisher, spApi sealingpipeline.API, legacyDeals legacy.LegacyDealManager, piecedirectory *piecedirectory.PieceDirectory, indexProv provider.Interface, idxProvWrapper *indexprovider.Wrapper, fullNode v1api.FullNode, bg gql.BlockGetter, ssm *sectorstatemgr.SectorStateMgr, mpool *mpoolmonitor.MpoolMonitor, mma *lib.MultiMinerAccessor, sask *storedask.StoredAsk) *gql.Server { - return func(lc fx.Lifecycle, r repo.LockedRepo, h host.Host, prov *storagemarket.Provider, dealsDB *db.DealsDB, logsDB *db.LogsDB, retDB *rtvllog.RetrievalLogDB, plDB *db.ProposalLogsDB, fundsDB *db.FundsDB, fundMgr *fundmanager.FundManager, - storageMgr *storagemanager.StorageManager, publisher *storageadapter.DealPublisher, spApi sealingpipeline.API, - legacyDeals legacy.LegacyDealManager, piecedirectory *piecedirectory.PieceDirectory, - indexProv provider.Interface, idxProvWrapper *indexprovider.Wrapper, fullNode v1api.FullNode, bg gql.BlockGetter, - ssm *sectorstatemgr.SectorStateMgr, mpool *mpoolmonitor.MpoolMonitor, mma *lib.MultiMinerAccessor, sask *storedask.StoredAsk) *gql.Server { - - resolverCtx, cancel := context.WithCancel(context.Background()) - resolver := gql.NewResolver(resolverCtx, cfg, r, h, dealsDB, logsDB, retDB, plDB, fundsDB, fundMgr, storageMgr, spApi, prov, legacyDeals, piecedirectory, publisher, indexProv, idxProvWrapper, fullNode, ssm, mpool, mma, sask) - svr := gql.NewServer(cfg, resolver, bg) - - lc.Append(fx.Hook{ - OnStart: svr.Start, - OnStop: func(ctx context.Context) error { - cancel() - return svr.Stop(ctx) - }, - }) - - return svr - } -} - // Use a caching sector accessor func NewSectorAccessor(cfg *config.Boost) sectoraccessor.SectorAccessorConstructor { // The cache just holds booleans, so there's no harm in using a big number diff --git a/retrievalmarket/server/channelstate.go b/retrievalmarket/server/channelstate.go index 9f596eae2..159fe1ba9 100644 --- a/retrievalmarket/server/channelstate.go +++ b/retrievalmarket/server/channelstate.go @@ -68,6 +68,26 @@ type channelState struct { message string } +func (c channelState) Vouchers() []datatransfer.Voucher { + //TODO implement me + panic("implement me") +} + +func (c channelState) VoucherResults() []datatransfer.VoucherResult { + //TODO implement me + panic("implement me") +} + +func (c channelState) LastVoucher() datatransfer.Voucher { + //TODO implement me + panic("implement me") +} + +func (c channelState) LastVoucherResult() datatransfer.VoucherResult { + //TODO implement me + panic("implement me") +} + // EmptyChannelState is the zero value for channel state, meaning not present var EmptyChannelState = channelState{} @@ -102,8 +122,8 @@ func (c channelState) Selector() ipld.Node { } // Voucher returns the voucher for this data transfer -func (c channelState) Voucher() datatransfer.TypedVoucher { - return datatransfer.TypedVoucher{} +func (c channelState) Voucher() datatransfer.Voucher { + return nil } // ReceivedCidsTotal returns the number of (non-unique) cids received so far diff --git a/retrievalmarket/server/gsunpaidretrieval.go b/retrievalmarket/server/gsunpaidretrieval.go index f20625ceb..30895e842 100644 --- a/retrievalmarket/server/gsunpaidretrieval.go +++ b/retrievalmarket/server/gsunpaidretrieval.go @@ -14,7 +14,6 @@ import ( "github.com/filecoin-project/boost/datatransfer/registry" "github.com/filecoin-project/boost/datatransfer/transport/graphsync/extension" "github.com/filecoin-project/boost/metrics" - "github.com/filecoin-project/boost/node/modules" "github.com/filecoin-project/boost/piecedirectory" "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" "github.com/filecoin-project/go-state-types/abi" @@ -73,11 +72,28 @@ var defaultExtensions = []graphsync.ExtensionName{ extension.ExtensionDataTransfer1_1, } +type RetrievalAskGetter struct { + ask legacyretrievaltypes.Ask +} + +func (rag *RetrievalAskGetter) GetAsk() *legacyretrievaltypes.Ask { + return &rag.ask +} + +func NewRetrievalAskGetter() *RetrievalAskGetter { + return &RetrievalAskGetter{ + ask: legacyretrievaltypes.Ask{ + PricePerByte: abi.NewTokenAmount(0), + UnsealPrice: abi.NewTokenAmount(0), + }, + } +} + type ValidationDeps struct { DealDecider DealDecider PieceDirectory *piecedirectory.PieceDirectory SectorAccessor SectorAccessor - AskStore *modules.RetrievalAskGetter + AskStore *RetrievalAskGetter } func NewGraphsyncUnpaidRetrieval(peerID peer.ID, gs graphsync.GraphExchange, dtnet network.DataTransferNetwork, vdeps ValidationDeps) (*GraphsyncUnpaidRetrieval, error) { diff --git a/retrievalmarket/server/queryask.go b/retrievalmarket/server/queryask.go index db7d70037..72705def7 100644 --- a/retrievalmarket/server/queryask.go +++ b/retrievalmarket/server/queryask.go @@ -6,7 +6,6 @@ import ( "fmt" "time" - "github.com/filecoin-project/boost/node/modules" "github.com/filecoin-project/boost/piecedirectory" "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" "github.com/filecoin-project/go-address" @@ -31,12 +30,12 @@ type QueryAskHandler struct { minerAddress address.Address pd *piecedirectory.PieceDirectory sa SectorAccessor - askStore modules.RetrievalAskGetter + askStore RetrievalAskGetter full v1api.FullNode host host.Host } -func NewQueryAskHandler(host host.Host, maddr address.Address, pd *piecedirectory.PieceDirectory, sa SectorAccessor, askStore modules.RetrievalAskGetter, full v1api.FullNode) *QueryAskHandler { +func NewQueryAskHandler(host host.Host, maddr address.Address, pd *piecedirectory.PieceDirectory, sa SectorAccessor, askStore RetrievalAskGetter, full v1api.FullNode) *QueryAskHandler { return &QueryAskHandler{ host: host, minerAddress: maddr, From d7be7e0ae3aac5c6cf9d3eb5d037bff3b6e34258 Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Fri, 20 Oct 2023 17:03:51 +0400 Subject: [PATCH 04/34] go mod tidy --- go.mod | 13 ++++--------- go.sum | 3 --- 2 files changed, 4 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 0bdb3124e..9893dba54 100644 --- a/go.mod +++ b/go.mod @@ -46,24 +46,20 @@ require ( github.com/graph-gophers/graphql-transport-ws v0.0.2 github.com/hashicorp/go-multierror v1.1.1 github.com/ipfs/go-block-format v0.2.0 - github.com/ipfs/go-blockservice v0.5.1 + github.com/ipfs/go-blockservice v0.5.1 // indirect github.com/ipfs/go-cid v0.4.1 github.com/ipfs/go-cidutil v0.1.0 github.com/ipfs/go-datastore v0.6.0 github.com/ipfs/go-graphsync v0.14.10 github.com/ipfs/go-ipfs-blocksutil v0.0.1 - github.com/ipfs/go-ipfs-chunker v0.0.5 + github.com/ipfs/go-ipfs-chunker v0.0.5 // indirect github.com/ipfs/go-ipfs-ds-help v1.1.0 // indirect github.com/ipfs/go-ipfs-exchange-interface v0.2.0 // indirect - github.com/ipfs/go-ipfs-exchange-offline v0.3.0 - github.com/ipfs/go-ipfs-files v0.3.0 // indirect github.com/ipfs/go-ipld-format v0.6.0 github.com/ipfs/go-ipld-legacy v0.2.1 - github.com/ipfs/go-libipfs v0.7.0 // indirect github.com/ipfs/go-log/v2 v2.5.1 - github.com/ipfs/go-merkledag v0.11.0 + github.com/ipfs/go-merkledag v0.11.0 // indirect github.com/ipfs/go-metrics-interface v0.0.1 - github.com/ipfs/go-unixfs v0.4.5 github.com/ipld/go-car v0.6.1 github.com/ipld/go-car/v2 v2.13.1 github.com/ipld/go-ipld-prime v0.21.0 @@ -215,7 +211,6 @@ require ( github.com/ipfs/go-fs-lock v0.0.7 // indirect github.com/ipfs/go-ipfs-cmds v0.10.0 // indirect github.com/ipfs/go-ipfs-delay v0.0.1 // indirect - github.com/ipfs/go-ipfs-posinfo v0.0.1 // indirect github.com/ipfs/go-ipfs-pq v0.0.3 // indirect github.com/ipfs/go-ipfs-util v0.0.3 // indirect github.com/ipfs/go-ipld-cbor v0.1.0 @@ -336,7 +331,6 @@ require ( github.com/filecoin-project/go-fil-markets v1.28.3 github.com/filecoin-project/lotus v1.23.4-rc1 github.com/ipfs/boxo v0.12.0 - github.com/ipfs/go-ipfs-blockstore v1.3.0 github.com/ipfs/kubo v0.22.0 github.com/ipni/go-libipni v0.5.1 github.com/ipni/ipni-cli v0.1.1 @@ -360,6 +354,7 @@ require ( github.com/hashicorp/go-uuid v1.0.2 // indirect github.com/hashicorp/golang-lru/arc/v2 v2.0.5 // indirect github.com/hashicorp/golang-lru/v2 v2.0.5 // indirect + github.com/ipfs/go-ipfs-blockstore v1.3.0 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect github.com/jackc/pgconn v1.11.0 // indirect github.com/jackc/pgio v1.0.0 // indirect diff --git a/go.sum b/go.sum index 7243491dd..2b9f602af 100644 --- a/go.sum +++ b/go.sum @@ -816,7 +816,6 @@ github.com/ipfs/go-ipfs-exchange-offline v0.3.0/go.mod h1:MOdJ9DChbb5u37M1IcbrRB github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= github.com/ipfs/go-ipfs-files v0.0.4/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= github.com/ipfs/go-ipfs-files v0.3.0 h1:fallckyc5PYjuMEitPNrjRfpwl7YFt69heCOUhsbGxQ= -github.com/ipfs/go-ipfs-files v0.3.0/go.mod h1:xAUtYMwB+iu/dtf6+muHNSFQCJG2dSiStR2P6sn9tIM= github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs= github.com/ipfs/go-ipfs-posinfo v0.0.1/go.mod h1:SwyeVP+jCwiDu0C313l/8jg6ZxM0qqtlt2a0vILTc1A= github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= @@ -852,7 +851,6 @@ github.com/ipfs/go-ipld-legacy v0.1.0/go.mod h1:86f5P/srAmh9GcIcWQR9lfFLZPrIyyXQ github.com/ipfs/go-ipld-legacy v0.2.1 h1:mDFtrBpmU7b//LzLSypVrXsD8QxkEWxu5qVxN99/+tk= github.com/ipfs/go-ipld-legacy v0.2.1/go.mod h1:782MOUghNzMO2DER0FlBR94mllfdCJCkTtDtPM51otM= github.com/ipfs/go-libipfs v0.7.0 h1:Mi54WJTODaOL2/ZSm5loi3SwI3jI2OuFWUrQIkJ5cpM= -github.com/ipfs/go-libipfs v0.7.0/go.mod h1:KsIf/03CqhICzyRGyGo68tooiBE2iFbI/rXW7FhAYr0= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= github.com/ipfs/go-log v1.0.0/go.mod h1:JO7RzlMK6rA+CIxFMLOuB6Wf5b81GDiKElL7UPSIKjA= github.com/ipfs/go-log v1.0.1/go.mod h1:HuWlQttfN6FWNHRhlY5yMk/lW7evQC0HHGOxEwMRR8I= @@ -891,7 +889,6 @@ github.com/ipfs/go-peertaskqueue v0.8.1/go.mod h1:Oxxd3eaK279FxeydSPPVGHzbwVeHja github.com/ipfs/go-unixfs v0.2.2-0.20190827150610-868af2e9e5cb/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k= github.com/ipfs/go-unixfs v0.3.1/go.mod h1:h4qfQYzghiIc8ZNFKiLMFWOTzrWIAtzYQ59W/pCFf1o= github.com/ipfs/go-unixfs v0.4.5 h1:wj8JhxvV1G6CD7swACwSKYa+NgtdWC1RUit+gFnymDU= -github.com/ipfs/go-unixfs v0.4.5/go.mod h1:BIznJNvt/gEx/ooRMI4Us9K8+qeGO7vx1ohnbk8gjFg= github.com/ipfs/go-unixfsnode v1.4.0/go.mod h1:qc7YFFZ8tABc58p62HnIYbUMwj9chhUuFWmxSokfePo= github.com/ipfs/go-unixfsnode v1.8.0 h1:yCkakzuE365glu+YkgzZt6p38CSVEBPgngL9ZkfnyQU= github.com/ipfs/go-unixfsnode v1.8.0/go.mod h1:HxRu9HYHOjK6HUqFBAi++7DVoWAHn0o4v/nZ/VA+0g8= From 60312fe7e7625cac9c746aaf2fa20182308a8bc2 Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Fri, 20 Oct 2023 18:13:35 +0400 Subject: [PATCH 05/34] fix lint errors --- .circleci/config.yml | 9 +-- cmd/boostx/stats_cmd.go | 2 +- cmd/lib/stores/error.go | 8 +- cmd/lib/stores/kvcarbs.go | 9 +-- datatransfer/channelmonitor/channelmonitor.go | 19 ----- datatransfer/impl/events.go | 8 +- datatransfer/transport/graphsync/graphsync.go | 44 ++++------- db/storageask.go | 3 +- itests/disabled_markets_v1_deal_test.go | 53 ------------- itests/framework/framework.go | 78 ++++++++++++------- ...test.go => graphsync_identity_cid_test.go} | 47 ++++++----- ...al_test.go => graphsync_retrieval_test.go} | 40 ++++++---- markets/piecestore/impl/piecestore.go | 16 ++-- markets/storageadapter/api.go | 49 ------------ markets/storageadapter/dealpublisher_test.go | 14 ++++ node/builder.go | 2 - node/modules/graphsync.go | 14 ---- retrievalmarket/server/gsunpaidretrieval.go | 6 -- .../server/gsunpaidretrieval_test.go | 33 ++++---- storagemarket/storedask/storedask.go | 2 + .../types/legacytypes/filestore/filestore.go | 3 +- storagemarket/types/types.go | 3 - 22 files changed, 176 insertions(+), 286 deletions(-) delete mode 100644 itests/disabled_markets_v1_deal_test.go rename itests/{markets_v1_identity_cid_test.go => graphsync_identity_cid_test.go} (77%) rename itests/{markets_v1_retrieval_test.go => graphsync_retrieval_test.go} (86%) delete mode 100644 markets/storageadapter/api.go diff --git a/.circleci/config.yml b/.circleci/config.yml index 11b5d4880..800c52572 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -321,17 +321,12 @@ workflows: - test: name: test-itest-markets_v1_identity_cid suite: itest-markets_v1_identity_cid - target: "./itests/markets_v1_identity_cid_test.go" + target: "./itests/graphsync_identity_cid_test.go" - test: name: test-itest-markets_v1_retrieval suite: itest-markets_v1_retrieval - target: "./itests/markets_v1_retrieval_test.go" - - - test: - name: test-itest-disabled_markets_v1_deal - suite: itest-disabled_markets_v1_deal - target: "./itests/disabled_markets_v1_deal_test.go" + target: "./itests/graphsync_retrieval_test.go" - test: name: test-all diff --git a/cmd/boostx/stats_cmd.go b/cmd/boostx/stats_cmd.go index 9745cee3a..1b8775955 100644 --- a/cmd/boostx/stats_cmd.go +++ b/cmd/boostx/stats_cmd.go @@ -8,7 +8,7 @@ import ( "sync" "github.com/filecoin-project/boost/retrievalmarket/lp2pimpl" - transports_types "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" + transports_types "github.com/filecoin-project/boost/retrievalmarket/types" clinode "github.com/filecoin-project/boost/cli/node" "github.com/filecoin-project/boost/cmd" diff --git a/cmd/lib/stores/error.go b/cmd/lib/stores/error.go index cc9a4767e..c7aecc738 100644 --- a/cmd/lib/stores/error.go +++ b/cmd/lib/stores/error.go @@ -1,9 +1,11 @@ package stores -import "golang.org/x/xerrors" +import ( + "errors" +) -var ErrNotFound = xerrors.New("not found") +var ErrNotFound = errors.New("not found") func IsNotFound(err error) bool { - return xerrors.Is(err, ErrNotFound) + return errors.Is(err, ErrNotFound) } diff --git a/cmd/lib/stores/kvcarbs.go b/cmd/lib/stores/kvcarbs.go index 4f457d43a..a4cb240f2 100644 --- a/cmd/lib/stores/kvcarbs.go +++ b/cmd/lib/stores/kvcarbs.go @@ -7,11 +7,10 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "sync" - blockstore "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/blockstore" "github.com/ipfs/boxo/ipld/merkledag" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" @@ -331,10 +330,10 @@ func (drsb *discardingReadSeekerPlusByte) Seek(offset int64, whence int) (int64, if n < 0 { panic("unsupported rewind via whence: io.SeekStart") } - _, err := io.CopyN(ioutil.Discard, drsb, n) + _, err := io.CopyN(io.Discard, drsb, n) return drsb.offset, err case io.SeekCurrent: - _, err := io.CopyN(ioutil.Discard, drsb, offset) + _, err := io.CopyN(io.Discard, drsb, offset) return drsb.offset, err default: panic("unsupported whence: io.SeekEnd") @@ -1628,7 +1627,7 @@ func (b *ReadWrite) Finalize() error { // Note that we can't use b.Close here, as that tries to grab the same // mutex we're holding here. - defer b.ronly.closeWithoutMutex() + defer b.ronly.closeWithoutMutex() //nolint:errcheck // TODO if index not needed don't bother flattening it. fi, err := b.idx.flatten(b.opts.IndexCodec) diff --git a/datatransfer/channelmonitor/channelmonitor.go b/datatransfer/channelmonitor/channelmonitor.go index 205e66163..d87d4923f 100644 --- a/datatransfer/channelmonitor/channelmonitor.go +++ b/datatransfer/channelmonitor/channelmonitor.go @@ -124,17 +124,6 @@ func (m *Monitor) Shutdown() { m.stop() } -// onShutdown shuts down all monitored channels. It is called when the run -// loop exits. -func (m *Monitor) onShutdown() { - m.lk.RLock() - defer m.lk.RUnlock() - - for _, ch := range m.channels { - ch.Shutdown() - } -} - // onMonitoredChannelShutdown is called when a monitored channel shuts down func (m *Monitor) onMonitoredChannelShutdown(chid datatransfer.ChannelID) { m.lk.Lock() @@ -353,14 +342,6 @@ func (mc *monitoredChannel) resetConsecutiveRestarts() { mc.consecutiveRestarts = 0 } -// Used by the tests -func (mc *monitoredChannel) isRestarting() bool { - mc.restartLk.Lock() - defer mc.restartLk.Unlock() - - return !mc.restartedAt.IsZero() -} - // Send a restart message for the channel func (mc *monitoredChannel) restartChannel() { var restartedAt time.Time diff --git a/datatransfer/impl/events.go b/datatransfer/impl/events.go index f67d4e0c2..f00b76a7b 100644 --- a/datatransfer/impl/events.go +++ b/datatransfer/impl/events.go @@ -43,7 +43,7 @@ func (m *manager) OnChannelOpened(chid datatransfer.ChannelID) error { // message over the transport. func (m *manager) OnDataReceived(chid datatransfer.ChannelID, link ipld.Link, size uint64, index int64, unique bool) error { ctx, _ := m.spansIndex.SpanForChannel(context.TODO(), chid) - ctx, span := otel.Tracer("data-transfer").Start(ctx, "dataReceived", trace.WithAttributes( + _, span := otel.Tracer("data-transfer").Start(ctx, "dataReceived", trace.WithAttributes( attribute.String("channelID", chid.String()), attribute.String("link", link.String()), attribute.Int64("index", index), @@ -103,7 +103,7 @@ func (m *manager) OnDataQueued(chid datatransfer.ChannelID, link ipld.Link, size // machine. ctx, _ := m.spansIndex.SpanForChannel(context.TODO(), chid) - ctx, span := otel.Tracer("data-transfer").Start(ctx, "dataQueued", trace.WithAttributes( + _, span := otel.Tracer("data-transfer").Start(ctx, "dataQueued", trace.WithAttributes( attribute.String("channelID", chid.String()), attribute.String("link", link.String()), attribute.Int64("size", int64(size)), @@ -150,7 +150,7 @@ func (m *manager) OnDataQueued(chid datatransfer.ChannelID, link ipld.Link, size func (m *manager) OnDataSent(chid datatransfer.ChannelID, link ipld.Link, size uint64, index int64, unique bool) error { ctx, _ := m.spansIndex.SpanForChannel(context.TODO(), chid) - ctx, span := otel.Tracer("data-transfer").Start(ctx, "dataSent", trace.WithAttributes( + _, span := otel.Tracer("data-transfer").Start(ctx, "dataSent", trace.WithAttributes( attribute.String("channelID", chid.String()), attribute.String("link", link.String()), attribute.Int64("size", int64(size)), @@ -197,7 +197,7 @@ func (m *manager) OnRequestReceived(chid datatransfer.ChannelID, request datatra } func (m *manager) OnTransferQueued(chid datatransfer.ChannelID) { - m.channels.TransferRequestQueued(chid) + m.channels.TransferRequestQueued(chid) //nolint:errcheck } func (m *manager) OnResponseReceived(chid datatransfer.ChannelID, response datatransfer.Response) error { diff --git a/datatransfer/transport/graphsync/graphsync.go b/datatransfer/transport/graphsync/graphsync.go index 50b2d88b7..e2de26926 100644 --- a/datatransfer/transport/graphsync/graphsync.go +++ b/datatransfer/transport/graphsync/graphsync.go @@ -7,16 +7,14 @@ import ( "sync" "time" + graphsync "github.com/filecoin-project/boost-graphsync" + "github.com/filecoin-project/boost-graphsync/donotsendfirstblocks" datatransfer2 "github.com/filecoin-project/boost/datatransfer" "github.com/filecoin-project/boost/datatransfer/transport/graphsync/extension" logging "github.com/ipfs/go-log/v2" "github.com/ipld/go-ipld-prime" "github.com/libp2p/go-libp2p/core/peer" "golang.org/x/sync/errgroup" - "golang.org/x/xerrors" - - graphsync "github.com/filecoin-project/boost-graphsync" - "github.com/filecoin-project/boost-graphsync/donotsendfirstblocks" ) var log = logging.Logger("dt_graphsync") @@ -196,7 +194,7 @@ func (t *Transport) executeGsRequest(req *gsReq) { // Request cancelled by client if _, ok := lastError.(graphsync.RequestClientCancelledErr); ok { - terr := xerrors.Errorf("graphsync request cancelled") + terr := fmt.Errorf("graphsync request cancelled") log.Warnf("channel %s: %s", req.channelID, terr) if err := t.events.OnRequestCancelled(req.channelID, terr); err != nil { log.Error(err) @@ -219,7 +217,7 @@ func (t *Transport) executeGsRequest(req *gsReq) { var completeErr error if lastError != nil { - completeErr = xerrors.Errorf("channel %s: graphsync request failed to complete: %w", req.channelID, lastError) + completeErr = fmt.Errorf("channel %s: graphsync request failed to complete: %w", req.channelID, lastError) } // Used by the tests to listen for when a request completes @@ -265,7 +263,7 @@ func (t *Transport) CloseChannel(ctx context.Context, chid datatransfer2.Channel err = ch.close(ctx) if err != nil { - return xerrors.Errorf("closing channel: %w", err) + return fmt.Errorf("closing channel: %w", err) } return nil } @@ -330,7 +328,7 @@ func (t *Transport) Shutdown(ctx context.Context) error { err := eg.Wait() if err != nil { - return xerrors.Errorf("shutting down graphsync transport: %w", err) + return fmt.Errorf("shutting down graphsync transport: %w", err) } return nil } @@ -689,7 +687,7 @@ func (t *Transport) gsCompletedResponseListener(p peer.ID, request graphsync.Req var completeErr error if status != graphsync.RequestCompletedFull { statusStr := gsResponseStatusCodeString(status) - completeErr = xerrors.Errorf("graphsync response to peer %s did not complete: response status code %s", p, statusStr) + completeErr = fmt.Errorf("graphsync response to peer %s did not complete: response status code %s", p, statusStr) } // Used by the tests to listen for when a response completes @@ -828,7 +826,7 @@ func (t *Transport) gsRequestorCancelledListener(p peer.ID, request graphsync.Re ch, err := t.getDTChannel(chid) if err != nil { - if !xerrors.Is(datatransfer2.ErrChannelNotFound, err) { + if !errors.Is(datatransfer2.ErrChannelNotFound, err) { log.Errorf("requestor cancelled: getting channel %s: %s", chid, err) } return @@ -899,7 +897,7 @@ func (t *Transport) getDTChannel(chid datatransfer2.ChannelID) (*dtChannel, erro ch, ok := t.dtChannels[chid] if !ok { - return nil, xerrors.Errorf("channel %s: %w", chid, datatransfer2.ErrChannelNotFound) + return nil, fmt.Errorf("channel %s: %w", chid, datatransfer2.ErrChannelNotFound) } return ch, nil } @@ -953,17 +951,17 @@ func (c *dtChannel) open( // Wait for the complete callback to be called err := waitForCompleteHook(ctx, completed) if err != nil { - return nil, xerrors.Errorf("%s: waiting for cancelled graphsync request to complete: %w", chid, err) + return nil, fmt.Errorf("%s: waiting for cancelled graphsync request to complete: %w", chid, err) } // Wait for the cancel request method to complete select { case err = <-errch: case <-ctx.Done(): - err = xerrors.Errorf("timed out waiting for graphsync request to be cancelled") + err = fmt.Errorf("timed out waiting for graphsync request to be cancelled") } if err != nil { - return nil, xerrors.Errorf("%s: restarting graphsync request: %w", chid, err) + return nil, fmt.Errorf("%s: restarting graphsync request: %w", chid, err) } } @@ -1230,8 +1228,8 @@ func (c *dtChannel) cancel(ctx context.Context) chan error { err := c.t.gs.Cancel(ctx, *requestID) // Ignore "request not found" errors - if err != nil && !xerrors.Is(graphsync.RequestNotFoundErr{}, err) { - errch <- xerrors.Errorf("cancelling graphsync request for channel %s: %w", c.channelID, err) + if err != nil && !errors.Is(graphsync.RequestNotFoundErr{}, err) { + errch <- fmt.Errorf("cancelling graphsync request for channel %s: %w", c.channelID, err) } else { errch <- nil } @@ -1267,20 +1265,6 @@ func (m *requestIDToChannelIDMap) load(key graphsync.RequestID) (datatransfer2.C return val.channelID, ok } -// get the value if any of the keys exists in the map -func (m *requestIDToChannelIDMap) any(ks ...graphsync.RequestID) (datatransfer2.ChannelID, bool) { - m.lk.RLock() - defer m.lk.RUnlock() - - for _, k := range ks { - val, ok := m.m[k] - if ok { - return val.channelID, ok - } - } - return datatransfer2.ChannelID{}, false -} - // set the value for a key func (m *requestIDToChannelIDMap) set(key graphsync.RequestID, sending bool, chid datatransfer2.ChannelID) { m.lk.Lock() diff --git a/db/storageask.go b/db/storageask.go index 243b71039..cfadc05fa 100644 --- a/db/storageask.go +++ b/db/storageask.go @@ -29,9 +29,8 @@ func (s *StorageAskDB) Update(ctx context.Context, ask legacytypes.StorageAsk) e case err != nil: return err default: - s.update(ctx, ask) + return s.update(ctx, ask) } - return nil } func (s *StorageAskDB) set(ctx context.Context, ask legacytypes.StorageAsk) error { diff --git a/itests/disabled_markets_v1_deal_test.go b/itests/disabled_markets_v1_deal_test.go deleted file mode 100644 index 566504bb6..000000000 --- a/itests/disabled_markets_v1_deal_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package itests - -import ( - "context" - "testing" - - "github.com/filecoin-project/boost/itests/framework" - "github.com/filecoin-project/boost/testutil" - lapi "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/itests/kit" - "github.com/stretchr/testify/require" -) - -func TestDisabledMarketsV1Deal(t *testing.T) { - ctx := context.Background() - log := framework.Log - - kit.QuietMiningLogs() - framework.SetLogLevel() - var opts []framework.FrameworkOpts - opts = append(opts, framework.EnableLegacyDeals(false)) - f := framework.NewTestFramework(ctx, t, opts...) - err := f.Start() - require.NoError(t, err) - defer f.Stop() - - // Create a CAR file - log.Debugw("using tempdir", "dir", f.HomeDir) - rseed := 0 - size := 7 << 20 // 7MiB file - - inPath, err := testutil.CreateRandomFile(f.HomeDir, rseed, size) - require.NoError(t, err) - res, err := f.FullNode.ClientImport(ctx, lapi.FileRef{Path: inPath}) - require.NoError(t, err) - - // Create a new markets v1 deal - dp := f.DefaultMarketsV1DealParams() - dp.Data.Root = res.Root - - log.Debugw("starting deal", "root", res.Root) - dealProposalCid, err := f.FullNode.ClientStartDeal(ctx, &dp) - require.NoError(t, err) - - log.Debugw("got deal proposal cid", "cid", dealProposalCid) - di, err := f.FullNode.ClientGetDealInfo(ctx, *dealProposalCid) - require.NoError(t, err) - - log.Debugw(di.Message) - - err = f.WaitDealSealed(ctx, dealProposalCid) - require.ErrorContains(t, err, "protocol are deprecated") -} diff --git a/itests/framework/framework.go b/itests/framework/framework.go index e58788c25..e3c746ac2 100644 --- a/itests/framework/framework.go +++ b/itests/framework/framework.go @@ -14,19 +14,19 @@ import ( "github.com/filecoin-project/boost/api" boostclient "github.com/filecoin-project/boost/client" + "github.com/filecoin-project/boost/datatransfer" "github.com/filecoin-project/boost/node" "github.com/filecoin-project/boost/node/config" "github.com/filecoin-project/boost/node/modules/dtypes" "github.com/filecoin-project/boost/node/repo" + "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" "github.com/filecoin-project/boost/storagemarket" "github.com/filecoin-project/boost/storagemarket/types" "github.com/filecoin-project/boost/storagemarket/types/dealcheckpoints" + "github.com/filecoin-project/boost/storagemarket/types/legacytypes" types2 "github.com/filecoin-project/boost/transport/types" "github.com/filecoin-project/go-address" cborutil "github.com/filecoin-project/go-cbor-util" - lotus_gfm_retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket" - gfm_storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket" - lotus_gfm_storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/builtin" @@ -35,7 +35,6 @@ import ( "github.com/filecoin-project/go-state-types/exitcode" lapi "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/v1api" - lbuild "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" chaintypes "github.com/filecoin-project/lotus/chain/types" ltypes "github.com/filecoin-project/lotus/chain/types" @@ -68,6 +67,7 @@ import ( "github.com/ipld/go-ipld-prime/traversal/selector" "github.com/libp2p/go-libp2p" "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" "github.com/multiformats/go-multihash" "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" @@ -658,18 +658,6 @@ func (f *TestFramework) signProposal(addr address.Address, proposal *market.Deal }, nil } -func (f *TestFramework) DefaultMarketsV1DealParams() lapi.StartDealParams { - return lapi.StartDealParams{ - Data: &lotus_gfm_storagemarket.DataRef{TransferType: gfm_storagemarket.TTGraphsync}, - EpochPrice: ltypes.NewInt(62500000), // minimum asking price - MinBlocksDuration: uint64(lbuild.MinDealDuration), - Miner: f.MinerAddr, - Wallet: f.DefaultWallet, - DealStartEpoch: 35000, - FastRetrieval: true, - } -} - func sendFunds(ctx context.Context, sender lapi.FullNode, recipient address.Address, amount abi.TokenAmount) error { senderAddr, err := sender.WalletDefaultAddress(ctx) if err != nil { @@ -738,14 +726,14 @@ func (f *TestFramework) WaitDealSealed(ctx context.Context, deal *cid.Cid) error } switch di.State { - case gfm_storagemarket.StorageDealAwaitingPreCommit, gfm_storagemarket.StorageDealSealing: - case gfm_storagemarket.StorageDealProposalRejected: + case legacytypes.StorageDealAwaitingPreCommit, legacytypes.StorageDealSealing: + case legacytypes.StorageDealProposalRejected: return errors.New("deal rejected") - case gfm_storagemarket.StorageDealFailing: + case legacytypes.StorageDealFailing: return errors.New("deal failed") - case gfm_storagemarket.StorageDealError: + case legacytypes.StorageDealError: return fmt.Errorf("deal errored: %s", di.Message) - case gfm_storagemarket.StorageDealActive: + case legacytypes.StorageDealActive: return nil } @@ -846,14 +834,14 @@ consumeEvents: continue } } - switch evt.Status { - case lotus_gfm_retrievalmarket.DealStatusCompleted: + switch legacyretrievaltypes.DealStatus(evt.Status) { + case legacyretrievaltypes.DealStatusCompleted: break consumeEvents - case lotus_gfm_retrievalmarket.DealStatusRejected: + case legacyretrievaltypes.DealStatusRejected: t.Fatalf("Retrieval Proposal Rejected: %s", evt.Message) case - lotus_gfm_retrievalmarket.DealStatusDealNotFound, - lotus_gfm_retrievalmarket.DealStatusErrored: + legacyretrievaltypes.DealStatusDealNotFound, + legacyretrievaltypes.DealStatusErrored: t.Fatalf("Retrieval Error: %s", evt.Message) } } @@ -876,3 +864,41 @@ consumeEvents: return ret } + +type RetrievalInfo struct { + PayloadCID cid.Cid + ID legacyretrievaltypes.DealID + PieceCID *cid.Cid + PricePerByte abi.TokenAmount + UnsealPrice abi.TokenAmount + + Status legacyretrievaltypes.DealStatus + Message string // more information about deal state, particularly errors + Provider peer.ID + BytesReceived uint64 + BytesPaidFor uint64 + TotalPaid abi.TokenAmount + + TransferChannelID *datatransfer.ChannelID + DataTransfer *DataTransferChannel + + // optional event if part of ClientGetRetrievalUpdates + Event *legacyretrievaltypes.ClientEvent +} + +type RestrievalRes struct { + DealID legacyretrievaltypes.DealID +} + +type DataTransferChannel struct { + TransferID datatransfer.TransferID + Status datatransfer.Status + BaseCID cid.Cid + IsInitiator bool + IsSender bool + Voucher string + Message string + OtherPeer peer.ID + Transferred uint64 + Stages *datatransfer.ChannelStages +} diff --git a/itests/markets_v1_identity_cid_test.go b/itests/graphsync_identity_cid_test.go similarity index 77% rename from itests/markets_v1_identity_cid_test.go rename to itests/graphsync_identity_cid_test.go index fac41b268..13cd37e58 100644 --- a/itests/markets_v1_identity_cid_test.go +++ b/itests/graphsync_identity_cid_test.go @@ -5,13 +5,16 @@ import ( "fmt" "math/rand" "os" + "path/filepath" "testing" + "time" + "github.com/davecgh/go-spew/spew" gstestutil "github.com/filecoin-project/boost-graphsync/testutil" "github.com/filecoin-project/boost/itests/framework" "github.com/filecoin-project/boost/testutil" - lapi "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/itests/kit" + "github.com/google/uuid" "github.com/ipfs/go-cid" "github.com/ipld/go-car/v2" "github.com/ipld/go-car/v2/storage" @@ -21,7 +24,7 @@ import ( cidlink "github.com/ipld/go-ipld-prime/linking/cid" "github.com/ipld/go-ipld-prime/node/basicnode" selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" - multihash "github.com/multiformats/go-multihash" + "github.com/multiformats/go-multihash" "github.com/stretchr/testify/require" ) @@ -40,7 +43,9 @@ func TestMarketsV1DealAndRetrievalWithIdentityCID(t *testing.T) { defer f.Stop() // Create a CAR file - carPath := f.HomeDir + "/testfile.car" + tempdir := t.TempDir() + log.Debugw("using tempdir", "dir", tempdir) + carPath := tempdir + "/testfile.car" log.Debugf("using test car %s", carPath) carFile, err := os.Create(carPath) req.NoError(err) @@ -82,29 +87,31 @@ func TestMarketsV1DealAndRetrievalWithIdentityCID(t *testing.T) { req.NoError(car.ReplaceRootsInFile(carPath, []cid.Cid{rootLink.(cidlink.Link).Cid})) log.Debugw("filled car, replaced root with correct root", "root", rootLink.String()) - // Import and make a deal to store + // Start a web server to serve the car files + log.Debug("starting webserver") + server, err := testutil.HttpTestFileServer(t, tempdir) + require.NoError(t, err) + defer server.Close() - res, err := f.FullNode.ClientImport(ctx, lapi.FileRef{Path: carPath, IsCAR: true}) - req.NoError(err) - - log.Debugw("imported data for deal") - - dp := f.DefaultMarketsV1DealParams() - dp.Data.Root = res.Root + // Create a new dummy deal + log.Debug("creating dummy deal") + dealUuid := uuid.New() + root := rootLink.(cidlink.Link).Cid - log.Debugw("starting deal", "root", res.Root) - dealProposalCid, err := f.FullNode.ClientStartDeal(ctx, &dp) - req.NoError(err) - - log.Debugw("got deal proposal cid", "cid", dealProposalCid) + // Make a deal + res, err := f.MakeDummyDeal(dealUuid, carPath, root, server.URL+"/"+filepath.Base(carPath), false) + require.NoError(t, err) + require.True(t, res.Result.Accepted) + log.Debugw("got response from MarketDummyDeal", "res", spew.Sdump(res)) + dealCid, err := res.DealParams.ClientDealProposal.Proposal.Cid() + require.NoError(t, err) - err = f.WaitDealSealed(ctx, dealProposalCid) - req.NoError(err) + time.Sleep(2 * time.Second) // Deal is stored and sealed, attempt different retrieval forms - log.Debugw("deal is sealed, starting retrieval", "cid", dealProposalCid, "root", res.Root) - outPath := f.Retrieve(ctx, t, dealProposalCid, rootLink.(cidlink.Link).Cid, false, selectorparse.CommonSelector_ExploreAllRecursively) + log.Debugw("deal is sealed, starting retrieval", "cid", dealCid.String(), "root", root.String()) + outPath := f.Retrieve(ctx, t, &dealCid, root, false, selectorparse.CommonSelector_ExploreAllRecursively) // Inspect what we got gotCids, err := testutil.CidsInCar(outPath) diff --git a/itests/markets_v1_retrieval_test.go b/itests/graphsync_retrieval_test.go similarity index 86% rename from itests/markets_v1_retrieval_test.go rename to itests/graphsync_retrieval_test.go index 44cd6c2c1..d09b096a9 100644 --- a/itests/markets_v1_retrieval_test.go +++ b/itests/graphsync_retrieval_test.go @@ -3,12 +3,15 @@ package itests import ( "context" "math" + "path/filepath" "testing" + "time" + "github.com/davecgh/go-spew/spew" "github.com/filecoin-project/boost/itests/framework" "github.com/filecoin-project/boost/testutil" - lapi "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/itests/kit" + "github.com/google/uuid" "github.com/ipfs/go-cid" "github.com/ipfs/go-unixfsnode" "github.com/ipld/go-ipld-prime/datamodel" @@ -31,11 +34,12 @@ func TestMarketsV1DealRetrieval(t *testing.T) { defer f.Stop() // Create a CAR file - log.Debugw("using tempdir", "dir", f.HomeDir) + tempdir := t.TempDir() + log.Debugw("using tempdir", "dir", tempdir) rseed := 0 size := 7 << 20 // 7MiB file - inPath, dirEnt := testutil.CreateRandomUnixfsFileInCar(t, f.HomeDir, rseed, size) + inPath, dirEnt := testutil.CreateRandomUnixfsFileInCar(t, tempdir, rseed, size) root := dirEnt.Root leaves := dirEnt.SelfCids[:len(dirEnt.SelfCids)-1] /* @@ -77,21 +81,29 @@ func TestMarketsV1DealRetrieval(t *testing.T) { bafkreifokzy5zcluf3hj23nkrvr7tx6sivpshkd4be5tpfibk6vm2mzlxy | RawLeaf | /0[7172032:7340031] (168000 B) */ - // Import and make a deal to store - - res, err := f.FullNode.ClientImport(ctx, lapi.FileRef{Path: inPath, IsCAR: true}) + // Start a web server to serve the car files + log.Debug("starting webserver") + server, err := testutil.HttpTestFileServer(t, tempdir) require.NoError(t, err) + defer server.Close() - dp := f.DefaultMarketsV1DealParams() - dp.Data.Root = res.Root + // Create a new dummy deal + log.Debug("creating dummy deal") + dealUuid := uuid.New() - log.Debugw("starting deal", "root", res.Root) - dealProposalCid, err := f.FullNode.ClientStartDeal(ctx, &dp) + // Make a deal + res, err := f.MakeDummyDeal(dealUuid, inPath, root, server.URL+"/"+filepath.Base(inPath), false) + require.NoError(t, err) + require.True(t, res.Result.Accepted) + log.Debugw("got response from MarketDummyDeal", "res", spew.Sdump(res)) + dealCid, err := res.DealParams.ClientDealProposal.Proposal.Cid() require.NoError(t, err) - log.Debugw("got deal proposal cid", "cid", dealProposalCid) + time.Sleep(2 * time.Second) + + log.Debugw("got deal proposal cid", "cid", dealCid.String()) - err = f.WaitDealSealed(ctx, dealProposalCid) + err = f.WaitDealSealed(ctx, &dealCid) require.NoError(t, err) // Deal is stored and sealed, attempt different retrieval forms @@ -165,8 +177,8 @@ func TestMarketsV1DealRetrieval(t *testing.T) { selNode = ss.Node() } - log.Debugw("deal is sealed, starting retrieval", "cid", dealProposalCid, "root", res.Root) - outPath := f.Retrieve(ctx, t, dealProposalCid, res.Root, false, selNode) + log.Debugw("deal is sealed, starting retrieval", "cid", dealCid.String(), "root", root) + outPath := f.Retrieve(ctx, t, &dealCid, root, false, selNode) // Inspect what we got gotCids, err := testutil.CidsInCar(outPath) diff --git a/markets/piecestore/impl/piecestore.go b/markets/piecestore/impl/piecestore.go index 74d9f2eae..12bc235d7 100644 --- a/markets/piecestore/impl/piecestore.go +++ b/markets/piecestore/impl/piecestore.go @@ -2,19 +2,19 @@ package piecestoreimpl import ( "context" + "errors" + "fmt" "github.com/filecoin-project/boost/markets/piecestore" "github.com/filecoin-project/boost/markets/piecestore/migrations" "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" + versioning "github.com/filecoin-project/go-ds-versioning/pkg" + versioned "github.com/filecoin-project/go-ds-versioning/pkg/statestore" "github.com/hannahhoward/go-pubsub" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/namespace" logging "github.com/ipfs/go-log/v2" - "golang.org/x/xerrors" - - versioning "github.com/filecoin-project/go-ds-versioning/pkg" - versioned "github.com/filecoin-project/go-ds-versioning/pkg/statestore" "github.com/filecoin-project/boost/markets/shared" ) @@ -146,8 +146,8 @@ func (ps *pieceStore) ListCidInfoKeys() ([]cid.Cid, error) { func (ps *pieceStore) GetPieceInfo(pieceCID cid.Cid) (piecestore.PieceInfo, error) { var out piecestore.PieceInfo if err := ps.pieces.Get(pieceCID).Get(&out); err != nil { - if xerrors.Is(err, datastore.ErrNotFound) { - return piecestore.PieceInfo{}, xerrors.Errorf("piece with CID %s: %w", pieceCID, legacyretrievaltypes.ErrNotFound) + if errors.Is(err, datastore.ErrNotFound) { + return piecestore.PieceInfo{}, fmt.Errorf("piece with CID %s: %w", pieceCID, legacyretrievaltypes.ErrNotFound) } return piecestore.PieceInfo{}, err } @@ -158,8 +158,8 @@ func (ps *pieceStore) GetPieceInfo(pieceCID cid.Cid) (piecestore.PieceInfo, erro func (ps *pieceStore) GetCIDInfo(payloadCID cid.Cid) (piecestore.CIDInfo, error) { var out piecestore.CIDInfo if err := ps.cidInfos.Get(payloadCID).Get(&out); err != nil { - if xerrors.Is(err, datastore.ErrNotFound) { - return piecestore.CIDInfo{}, xerrors.Errorf("payload CID %s: %w", payloadCID, legacyretrievaltypes.ErrNotFound) + if errors.Is(err, datastore.ErrNotFound) { + return piecestore.CIDInfo{}, fmt.Errorf("payload CID %s: %w", payloadCID, legacyretrievaltypes.ErrNotFound) } return piecestore.CIDInfo{}, err } diff --git a/markets/storageadapter/api.go b/markets/storageadapter/api.go deleted file mode 100644 index 922585825..000000000 --- a/markets/storageadapter/api.go +++ /dev/null @@ -1,49 +0,0 @@ -package storageadapter - -import ( - "context" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/blockstore" - "github.com/filecoin-project/lotus/chain/actors/adt" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/types" - cbor "github.com/ipfs/go-ipld-cbor" - "golang.org/x/xerrors" -) - -type apiWrapper struct { - api interface { - StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) - blockstore.ChainIO - } -} - -func (ca *apiWrapper) diffPreCommits(ctx context.Context, actor address.Address, pre, cur types.TipSetKey) (*miner.PreCommitChanges, error) { - store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(ca.api))) - - preAct, err := ca.api.StateGetActor(ctx, actor, pre) - if err != nil { - return nil, xerrors.Errorf("getting pre actor: %w", err) - } - curAct, err := ca.api.StateGetActor(ctx, actor, cur) - if err != nil { - return nil, xerrors.Errorf("getting cur actor: %w", err) - } - - preSt, err := miner.Load(store, preAct) - if err != nil { - return nil, xerrors.Errorf("loading miner actor: %w", err) - } - curSt, err := miner.Load(store, curAct) - if err != nil { - return nil, xerrors.Errorf("loading miner actor: %w", err) - } - - diff, err := miner.DiffPreCommits(preSt, curSt) - if err != nil { - return nil, xerrors.Errorf("diff precommits: %w", err) - } - - return diff, err -} diff --git a/markets/storageadapter/dealpublisher_test.go b/markets/storageadapter/dealpublisher_test.go index 7b2b182fc..0d01c1fd2 100644 --- a/markets/storageadapter/dealpublisher_test.go +++ b/markets/storageadapter/dealpublisher_test.go @@ -4,10 +4,12 @@ package storageadapter import ( "bytes" "context" + "fmt" "testing" "time" cborutil "github.com/filecoin-project/go-cbor-util" + blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" "github.com/raulk/clock" "github.com/stretchr/testify/require" @@ -483,3 +485,15 @@ func getWorkerActor(t *testing.T) address.Address { func getProviderActor(t *testing.T) address.Address { return tutils.NewActorAddr(t, "provider") } + +var seq int + +func generateCids(n int) []cid.Cid { + cids := make([]cid.Cid, 0, n) + for i := 0; i < n; i++ { + c := blocks.NewBlock([]byte(fmt.Sprint(seq))).Cid() + seq++ + cids = append(cids, c) + } + return cids +} diff --git a/node/builder.go b/node/builder.go index 4038421df..b60dcb3c4 100644 --- a/node/builder.go +++ b/node/builder.go @@ -538,8 +538,6 @@ func ConfigBoost(cfg *config.Boost) Option { // Lotus Markets Override(new(dtypes.ProviderTransferNetwork), modules.NewProviderTransferNetwork), - Override(new(*modules.LinkSystemProv), modules.NewLinkSystemProvider), - Override(new(server.LinkSystemProvider), From(new(*modules.LinkSystemProv))), Override(new(*server.GraphsyncUnpaidRetrieval), modules.RetrievalGraphsync(cfg.LotusDealmaking.SimultaneousTransfersForStorage, cfg.LotusDealmaking.SimultaneousTransfersForStoragePerClient, cfg.LotusDealmaking.SimultaneousTransfersForRetrieval)), Override(new(dtypes.StagingGraphsync), From(new(*server.GraphsyncUnpaidRetrieval))), Override(StartPieceDoctorKey, modules.NewPieceDoctor), diff --git a/node/modules/graphsync.go b/node/modules/graphsync.go index 4fa8a274f..50fe7572c 100644 --- a/node/modules/graphsync.go +++ b/node/modules/graphsync.go @@ -18,25 +18,11 @@ import ( "github.com/filecoin-project/lotus/metrics" lotus_helpers "github.com/filecoin-project/lotus/node/modules/helpers" "github.com/ipfs/kubo/core/node/helpers" - "github.com/ipld/go-ipld-prime" "github.com/libp2p/go-libp2p/core/host" "go.opencensus.io/stats" "go.uber.org/fx" ) -// LinkSystemProv is used to avoid circular dependencies -type LinkSystemProv struct { - *ipld.LinkSystem -} - -func NewLinkSystemProvider() *LinkSystemProv { - return &LinkSystemProv{} -} - -func (p *LinkSystemProv) LinkSys() *ipld.LinkSystem { - return p.LinkSystem -} - // RetrievalGraphsync creates a graphsync instance used to serve retrievals. func RetrievalGraphsync(parallelTransfersForStorage uint64, parallelTransfersForStoragePerPeer uint64, parallelTransfersForRetrieval uint64) func(mctx lotus_helpers.MetricsCtx, lc fx.Lifecycle, pid *piecedirectory.PieceDirectory, h host.Host, net dtypes.ProviderTransferNetwork, dealDecider dtypes.RetrievalDealFilter, sa *lib.MultiMinerAccessor, askGetter *server.RetrievalAskGetter) (*server.GraphsyncUnpaidRetrieval, error) { return func(mctx lotus_helpers.MetricsCtx, lc fx.Lifecycle, pid *piecedirectory.PieceDirectory, h host.Host, net dtypes.ProviderTransferNetwork, dealDecider dtypes.RetrievalDealFilter, sa *lib.MultiMinerAccessor, askGetter *server.RetrievalAskGetter) (*server.GraphsyncUnpaidRetrieval, error) { diff --git a/retrievalmarket/server/gsunpaidretrieval.go b/retrievalmarket/server/gsunpaidretrieval.go index 30895e842..09c8704af 100644 --- a/retrievalmarket/server/gsunpaidretrieval.go +++ b/retrievalmarket/server/gsunpaidretrieval.go @@ -19,7 +19,6 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/hannahhoward/go-pubsub" logging "github.com/ipfs/go-log/v2" - "github.com/ipld/go-ipld-prime" "github.com/libp2p/go-libp2p/core/peer" cbg "github.com/whyrusleeping/cbor-gen" "go.opencensus.io/stats" @@ -39,10 +38,6 @@ type reqId struct { id datatransfer2.TransferID } -type LinkSystemProvider interface { - LinkSys() *ipld.LinkSystem -} - // GraphsyncUnpaidRetrieval intercepts incoming requests to Graphsync. // If the request is for a paid retrieval, it is forwarded to the existing // Graphsync implementation. @@ -55,7 +50,6 @@ type GraphsyncUnpaidRetrieval struct { validator *requestValidator pubSubDT *pubsub.PubSub pubSubMkts *pubsub.PubSub - linkSystem LinkSystemProvider activeRetrievalsLk sync.RWMutex activeRetrievals map[reqId]*retrievalState diff --git a/retrievalmarket/server/gsunpaidretrieval_test.go b/retrievalmarket/server/gsunpaidretrieval_test.go index ae330ebe6..36f8ee4c4 100644 --- a/retrievalmarket/server/gsunpaidretrieval_test.go +++ b/retrievalmarket/server/gsunpaidretrieval_test.go @@ -1,31 +1,28 @@ package server import ( - "errors" "testing" - "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" - "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log/v2" ) -var tlog = logging.Logger("testgs") +//var tlog = logging.Logger("testgs") -type testCase struct { - name string - reqPayloadCid cid.Cid - watch func(client legacyretrievaltypes.RetrievalClient, gsupr *GraphsyncUnpaidRetrieval) - ask *legacyretrievaltypes.Ask - noUnsealedCopy bool - expectErr bool - expectClientCancelEvent bool - expectProviderCancelEvent bool - expectRejection string -} +//type testCase struct { +// name string +// reqPayloadCid cid.Cid +// watch func(client legacyretrievaltypes.RetrievalClient, gsupr *GraphsyncUnpaidRetrieval) +// ask *legacyretrievaltypes.Ask +// noUnsealedCopy bool +// expectErr bool +// expectClientCancelEvent bool +// expectProviderCancelEvent bool +// expectRejection string +//} -var providerCancelled = errors.New("provider cancelled") -var clientCancelled = errors.New("client cancelled") -var clientRejected = errors.New("client received reject response") +//var providerCancelled = errors.New("provider cancelled") +//var clientCancelled = errors.New("client cancelled") +//var clientRejected = errors.New("client received reject response") func TestGS(t *testing.T) { t.Skip("refactor tests to use boost client") diff --git a/storagemarket/storedask/storedask.go b/storagemarket/storedask/storedask.go index f9099952a..930b1a166 100644 --- a/storagemarket/storedask/storedask.go +++ b/storagemarket/storedask/storedask.go @@ -93,6 +93,8 @@ func signBytes(ctx context.Context, signer address.Address, b []byte, f api.Full return nil, err } + log.Debugf("signing the ask %s with address %s", string(b), signer.String()) + localSignature, err := f.WalletSign(ctx, signer, b) if err != nil { return nil, err diff --git a/storagemarket/types/legacytypes/filestore/filestore.go b/storagemarket/types/legacytypes/filestore/filestore.go index a9c802102..9334bc230 100644 --- a/storagemarket/types/legacytypes/filestore/filestore.go +++ b/storagemarket/types/legacytypes/filestore/filestore.go @@ -3,7 +3,6 @@ package filestore import ( "fmt" "io" - "io/ioutil" "os" "path" "path/filepath" @@ -62,7 +61,7 @@ func (fs fileStore) Delete(p Path) error { } func (fs fileStore) CreateTemp() (File, error) { - f, err := ioutil.TempFile(fs.base, "fstmp") + f, err := os.CreateTemp(fs.base, "fstmp") if err != nil { return nil, err } diff --git a/storagemarket/types/types.go b/storagemarket/types/types.go index c40ac08a3..d345504d7 100644 --- a/storagemarket/types/types.go +++ b/storagemarket/types/types.go @@ -18,12 +18,9 @@ import ( "github.com/filecoin-project/lotus/storage/sealer/storiface" "github.com/google/uuid" "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" "github.com/ipni/go-libipni/maurl" ) -var log = logging.Logger("boost-provider-types") - //go:generate cbor-gen-for --map-encoding StorageAsk DealParamsV120 DealParams Transfer DealResponse DealStatusRequest DealStatusResponse DealStatus //go:generate go run github.com/golang/mock/mockgen -destination=mock_types/mocks.go -package=mock_types . PieceAdder,CommpCalculator,DealPublisher,ChainDealManager,IndexProvider From e6f0a1809ebe8eebafbf1966be0dd21310c58002 Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Fri, 20 Oct 2023 18:27:22 +0400 Subject: [PATCH 06/34] convert RetrievalAskGetter to interface --- cmd/lib/stores/dagstore.go | 74 --------------------- markets/sectoraccessor/sectoraccessor.go | 4 +- retrievalmarket/server/gsunpaidretrieval.go | 12 ++-- retrievalmarket/types/types.go | 14 ++++ 4 files changed, 24 insertions(+), 80 deletions(-) create mode 100644 retrievalmarket/types/types.go diff --git a/cmd/lib/stores/dagstore.go b/cmd/lib/stores/dagstore.go index ffdb4204f..24c722ea1 100644 --- a/cmd/lib/stores/dagstore.go +++ b/cmd/lib/stores/dagstore.go @@ -1,86 +1,12 @@ package stores import ( - "context" "io" - "github.com/filecoin-project/boost/storagemarket/types/legacytypes" bstore "github.com/ipfs/boxo/blockstore" - "github.com/ipfs/go-cid" - carindex "github.com/ipld/go-car/v2/index" - - "github.com/filecoin-project/dagstore" ) type ClosableBlockstore interface { bstore.Blockstore io.Closer } - -// DAGStoreWrapper hides the details of the DAG store implementation from -// the other parts of go-fil-markets. -type DAGStoreWrapper interface { - // RegisterShard loads a CAR file into the DAG store and builds an - // index for it, sending the result on the supplied channel on completion - RegisterShard(ctx context.Context, pieceCid cid.Cid, carPath string, eagerInit bool, resch chan dagstore.ShardResult) error - - // LoadShard fetches the data for a shard and provides a blockstore - // interface to it. - // - // The blockstore must be closed to release the shard. - LoadShard(ctx context.Context, pieceCid cid.Cid) (ClosableBlockstore, error) - - // MigrateDeals migrates the supplied storage deals into the DAG store. - MigrateDeals(ctx context.Context, deals []legacytypes.MinerDeal) (bool, error) - - // GetPiecesContainingBlock returns the CID of all pieces that contain - // the block with the given CID - GetPiecesContainingBlock(blockCID cid.Cid) ([]cid.Cid, error) - - GetIterableIndexForPiece(pieceCid cid.Cid) (carindex.IterableIndex, error) - - // DestroyShard initiates the registration of a new shard. - // - // This method returns an error synchronously if preliminary validation fails. - // Otherwise, it queues the shard for destruction. The caller should monitor - // supplied channel for a result. - DestroyShard(ctx context.Context, pieceCid cid.Cid, resch chan dagstore.ShardResult) error - - // Close closes the dag store wrapper. - Close() error -} - -// RegisterShardSync calls the DAGStore RegisterShard method and waits -// synchronously in a dedicated channel until the registration has completed -// fully. -func RegisterShardSync(ctx context.Context, ds DAGStoreWrapper, pieceCid cid.Cid, carPath string, eagerInit bool) error { - resch := make(chan dagstore.ShardResult, 1) - if err := ds.RegisterShard(ctx, pieceCid, carPath, eagerInit, resch); err != nil { - return err - } - - // TODO: Can I rely on RegisterShard to return an error if the context times out? - select { - case <-ctx.Done(): - return ctx.Err() - case res := <-resch: - return res.Error - } -} - -// DestroyShardSync calls the DAGStore DestroyShard method and waits -// synchronously in a dedicated channel until the shard has been destroyed completely. -func DestroyShardSync(ctx context.Context, ds DAGStoreWrapper, pieceCid cid.Cid) error { - resch := make(chan dagstore.ShardResult, 1) - - if err := ds.DestroyShard(ctx, pieceCid, resch); err != nil { - return err - } - - select { - case <-ctx.Done(): - return ctx.Err() - case res := <-resch: - return res.Error - } -} diff --git a/markets/sectoraccessor/sectoraccessor.go b/markets/sectoraccessor/sectoraccessor.go index 9b709d3b5..b7e350f14 100644 --- a/markets/sectoraccessor/sectoraccessor.go +++ b/markets/sectoraccessor/sectoraccessor.go @@ -8,9 +8,9 @@ import ( logging "github.com/ipfs/go-log/v2" "golang.org/x/xerrors" + retrievalmarket_types "github.com/filecoin-project/boost/retrievalmarket/types" "github.com/filecoin-project/dagstore/mount" "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api" @@ -32,7 +32,7 @@ type sectorAccessor struct { full v1api.FullNode } -var _ retrievalmarket.SectorAccessor = (*sectorAccessor)(nil) +var _ retrievalmarket_types.SectorAccessor = (*sectorAccessor)(nil) func NewSectorAccessor(maddr dtypes.MinerAddress, secb sectorblocks.SectorBuilder, pp sealer.PieceProvider, full v1api.FullNode) dagstore.SectorAccessor { return §orAccessor{address.Address(maddr), secb, pp, full} diff --git a/retrievalmarket/server/gsunpaidretrieval.go b/retrievalmarket/server/gsunpaidretrieval.go index 09c8704af..014d3d94e 100644 --- a/retrievalmarket/server/gsunpaidretrieval.go +++ b/retrievalmarket/server/gsunpaidretrieval.go @@ -66,16 +66,20 @@ var defaultExtensions = []graphsync.ExtensionName{ extension.ExtensionDataTransfer1_1, } -type RetrievalAskGetter struct { +type RetrievalAskGetter interface { + GetAsk() *legacyretrievaltypes.Ask +} + +type retrievalAskGetter struct { ask legacyretrievaltypes.Ask } -func (rag *RetrievalAskGetter) GetAsk() *legacyretrievaltypes.Ask { +func (rag *retrievalAskGetter) GetAsk() *legacyretrievaltypes.Ask { return &rag.ask } -func NewRetrievalAskGetter() *RetrievalAskGetter { - return &RetrievalAskGetter{ +func NewRetrievalAskGetter() *retrievalAskGetter { + return &retrievalAskGetter{ ask: legacyretrievaltypes.Ask{ PricePerByte: abi.NewTokenAmount(0), UnsealPrice: abi.NewTokenAmount(0), diff --git a/retrievalmarket/types/types.go b/retrievalmarket/types/types.go new file mode 100644 index 000000000..929615d81 --- /dev/null +++ b/retrievalmarket/types/types.go @@ -0,0 +1,14 @@ +package types + +import ( + "context" + "io" + + "github.com/filecoin-project/go-state-types/abi" +) + +// SectorAccessor provides methods to unseal and get the seal status of a sector +type SectorAccessor interface { + UnsealSector(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (io.ReadCloser, error) + IsUnsealed(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (bool, error) +} From 0584b7f9ae0e943c2138b51bfdfa58e57e1fef68 Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Fri, 20 Oct 2023 18:32:50 +0400 Subject: [PATCH 07/34] fix interface pointer --- node/modules/graphsync.go | 4 ++-- retrievalmarket/server/gsunpaidretrieval.go | 2 +- retrievalmarket/server/validation.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/node/modules/graphsync.go b/node/modules/graphsync.go index 50fe7572c..106e270e1 100644 --- a/node/modules/graphsync.go +++ b/node/modules/graphsync.go @@ -24,8 +24,8 @@ import ( ) // RetrievalGraphsync creates a graphsync instance used to serve retrievals. -func RetrievalGraphsync(parallelTransfersForStorage uint64, parallelTransfersForStoragePerPeer uint64, parallelTransfersForRetrieval uint64) func(mctx lotus_helpers.MetricsCtx, lc fx.Lifecycle, pid *piecedirectory.PieceDirectory, h host.Host, net dtypes.ProviderTransferNetwork, dealDecider dtypes.RetrievalDealFilter, sa *lib.MultiMinerAccessor, askGetter *server.RetrievalAskGetter) (*server.GraphsyncUnpaidRetrieval, error) { - return func(mctx lotus_helpers.MetricsCtx, lc fx.Lifecycle, pid *piecedirectory.PieceDirectory, h host.Host, net dtypes.ProviderTransferNetwork, dealDecider dtypes.RetrievalDealFilter, sa *lib.MultiMinerAccessor, askGetter *server.RetrievalAskGetter) (*server.GraphsyncUnpaidRetrieval, error) { +func RetrievalGraphsync(parallelTransfersForStorage uint64, parallelTransfersForStoragePerPeer uint64, parallelTransfersForRetrieval uint64) func(mctx lotus_helpers.MetricsCtx, lc fx.Lifecycle, pid *piecedirectory.PieceDirectory, h host.Host, net dtypes.ProviderTransferNetwork, dealDecider dtypes.RetrievalDealFilter, sa *lib.MultiMinerAccessor, askGetter server.RetrievalAskGetter) (*server.GraphsyncUnpaidRetrieval, error) { + return func(mctx lotus_helpers.MetricsCtx, lc fx.Lifecycle, pid *piecedirectory.PieceDirectory, h host.Host, net dtypes.ProviderTransferNetwork, dealDecider dtypes.RetrievalDealFilter, sa *lib.MultiMinerAccessor, askGetter server.RetrievalAskGetter) (*server.GraphsyncUnpaidRetrieval, error) { // Graphsync tracks metrics separately, pass nil blockMetrics to the remote blockstore rb := remoteblockstore.NewRemoteBlockstore(pid, nil) diff --git a/retrievalmarket/server/gsunpaidretrieval.go b/retrievalmarket/server/gsunpaidretrieval.go index 014d3d94e..e25f9548c 100644 --- a/retrievalmarket/server/gsunpaidretrieval.go +++ b/retrievalmarket/server/gsunpaidretrieval.go @@ -91,7 +91,7 @@ type ValidationDeps struct { DealDecider DealDecider PieceDirectory *piecedirectory.PieceDirectory SectorAccessor SectorAccessor - AskStore *RetrievalAskGetter + AskStore RetrievalAskGetter } func NewGraphsyncUnpaidRetrieval(peerID peer.ID, gs graphsync.GraphExchange, dtnet network.DataTransferNetwork, vdeps ValidationDeps) (*GraphsyncUnpaidRetrieval, error) { diff --git a/retrievalmarket/server/validation.go b/retrievalmarket/server/validation.go index 662d1f712..8b9c521a1 100644 --- a/retrievalmarket/server/validation.go +++ b/retrievalmarket/server/validation.go @@ -113,7 +113,7 @@ func (rv *requestValidator) acceptDeal(receiver peer.ID, proposal *legacyretriev // Check if the piece is unsealed _, isUnsealed, err := rv.getPiece(proposal.PayloadCID, proposal.PieceCID) if err != nil { - if err == legacyretrievaltypes.ErrNotFound { + if errors.Is(err, legacyretrievaltypes.ErrNotFound) { return fmt.Errorf("there is no piece containing payload cid %s: %w", proposal.PayloadCID, err) } return err From 03e2cb6baef00efba3d85cf546a06f61986da4a8 Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Fri, 20 Oct 2023 21:32:41 +0400 Subject: [PATCH 08/34] fix dependency injection, move ask db --- gql/module.go | 4 +- gql/resolver.go | 4 +- node/builder.go | 16 ++----- node/modules/storageminer.go | 18 ++++---- storagemarket/storedask/create_ask_db.sql | 10 +++++ .../storedask/db.go | 43 ++++++++++++++++--- storagemarket/storedask/storedask.go | 37 ++++++++++------ 7 files changed, 87 insertions(+), 45 deletions(-) create mode 100644 storagemarket/storedask/create_ask_db.sql rename db/storageask.go => storagemarket/storedask/db.go (66%) diff --git a/gql/module.go b/gql/module.go index 139e3f81d..c4e1134f1 100644 --- a/gql/module.go +++ b/gql/module.go @@ -27,12 +27,12 @@ import ( "go.uber.org/fx" ) -func NewGraphqlServer(cfg *config.Boost) func(lc fx.Lifecycle, r repo.LockedRepo, h host.Host, prov *storagemarket.Provider, dealsDB *db.DealsDB, logsDB *db.LogsDB, retDB *rtvllog.RetrievalLogDB, plDB *db.ProposalLogsDB, fundsDB *db.FundsDB, fundMgr *fundmanager.FundManager, storageMgr *storagemanager.StorageManager, publisher *storageadapter.DealPublisher, spApi sealingpipeline.API, legacyDeals legacy.LegacyDealManager, piecedirectory *piecedirectory.PieceDirectory, indexProv provider.Interface, idxProvWrapper *indexprovider.Wrapper, fullNode v1api.FullNode, bg BlockGetter, ssm *sectorstatemgr.SectorStateMgr, mpool *mpoolmonitor.MpoolMonitor, mma *lib.MultiMinerAccessor, sask *storedask.StoredAsk) *Server { +func NewGraphqlServer(cfg *config.Boost) func(lc fx.Lifecycle, r repo.LockedRepo, h host.Host, prov *storagemarket.Provider, dealsDB *db.DealsDB, logsDB *db.LogsDB, retDB *rtvllog.RetrievalLogDB, plDB *db.ProposalLogsDB, fundsDB *db.FundsDB, fundMgr *fundmanager.FundManager, storageMgr *storagemanager.StorageManager, publisher *storageadapter.DealPublisher, spApi sealingpipeline.API, legacyDeals legacy.LegacyDealManager, piecedirectory *piecedirectory.PieceDirectory, indexProv provider.Interface, idxProvWrapper *indexprovider.Wrapper, fullNode v1api.FullNode, bg BlockGetter, ssm *sectorstatemgr.SectorStateMgr, mpool *mpoolmonitor.MpoolMonitor, mma *lib.MultiMinerAccessor, sask storedask.StoredAsk) *Server { return func(lc fx.Lifecycle, r repo.LockedRepo, h host.Host, prov *storagemarket.Provider, dealsDB *db.DealsDB, logsDB *db.LogsDB, retDB *rtvllog.RetrievalLogDB, plDB *db.ProposalLogsDB, fundsDB *db.FundsDB, fundMgr *fundmanager.FundManager, storageMgr *storagemanager.StorageManager, publisher *storageadapter.DealPublisher, spApi sealingpipeline.API, legacyDeals legacy.LegacyDealManager, piecedirectory *piecedirectory.PieceDirectory, indexProv provider.Interface, idxProvWrapper *indexprovider.Wrapper, fullNode v1api.FullNode, bg BlockGetter, - ssm *sectorstatemgr.SectorStateMgr, mpool *mpoolmonitor.MpoolMonitor, mma *lib.MultiMinerAccessor, sask *storedask.StoredAsk) *Server { + ssm *sectorstatemgr.SectorStateMgr, mpool *mpoolmonitor.MpoolMonitor, mma *lib.MultiMinerAccessor, sask storedask.StoredAsk) *Server { resolverCtx, cancel := context.WithCancel(context.Background()) resolver := NewResolver(resolverCtx, cfg, r, h, dealsDB, logsDB, retDB, plDB, fundsDB, fundMgr, storageMgr, spApi, prov, legacyDeals, piecedirectory, publisher, indexProv, idxProvWrapper, fullNode, ssm, mpool, mma, sask) diff --git a/gql/resolver.go b/gql/resolver.go index b22c140e9..bf95ee0ad 100644 --- a/gql/resolver.go +++ b/gql/resolver.go @@ -72,10 +72,10 @@ type resolver struct { fullNode v1api.FullNode mpool *mpoolmonitor.MpoolMonitor mma *lib.MultiMinerAccessor - askProv *storedask.StoredAsk + askProv storedask.StoredAsk } -func NewResolver(ctx context.Context, cfg *config.Boost, r lotus_repo.LockedRepo, h host.Host, dealsDB *db.DealsDB, logsDB *db.LogsDB, retDB *rtvllog.RetrievalLogDB, plDB *db.ProposalLogsDB, fundsDB *db.FundsDB, fundMgr *fundmanager.FundManager, storageMgr *storagemanager.StorageManager, spApi sealingpipeline.API, provider *storagemarket.Provider, legacyDeals legacy.LegacyDealManager, piecedirectory *piecedirectory.PieceDirectory, publisher *storageadapter.DealPublisher, indexProv provider.Interface, idxProvWrapper *indexprovider.Wrapper, fullNode v1api.FullNode, ssm *sectorstatemgr.SectorStateMgr, mpool *mpoolmonitor.MpoolMonitor, mma *lib.MultiMinerAccessor, assk *storedask.StoredAsk) *resolver { +func NewResolver(ctx context.Context, cfg *config.Boost, r lotus_repo.LockedRepo, h host.Host, dealsDB *db.DealsDB, logsDB *db.LogsDB, retDB *rtvllog.RetrievalLogDB, plDB *db.ProposalLogsDB, fundsDB *db.FundsDB, fundMgr *fundmanager.FundManager, storageMgr *storagemanager.StorageManager, spApi sealingpipeline.API, provider *storagemarket.Provider, legacyDeals legacy.LegacyDealManager, piecedirectory *piecedirectory.PieceDirectory, publisher *storageadapter.DealPublisher, indexProv provider.Interface, idxProvWrapper *indexprovider.Wrapper, fullNode v1api.FullNode, ssm *sectorstatemgr.SectorStateMgr, mpool *mpoolmonitor.MpoolMonitor, mma *lib.MultiMinerAccessor, assk storedask.StoredAsk) *resolver { return &resolver{ ctx: ctx, cfg: cfg, diff --git a/node/builder.go b/node/builder.go index b60dcb3c4..9325283e1 100644 --- a/node/builder.go +++ b/node/builder.go @@ -49,7 +49,6 @@ import ( "github.com/filecoin-project/lotus/journal/alerting" _ "github.com/filecoin-project/lotus/lib/sigs/bls" _ "github.com/filecoin-project/lotus/lib/sigs/secp" - mdagstore "github.com/filecoin-project/lotus/markets/dagstore" lotus_config "github.com/filecoin-project/lotus/node/config" lotus_common "github.com/filecoin-project/lotus/node/impl/common" lotus_net "github.com/filecoin-project/lotus/node/impl/net" @@ -151,6 +150,7 @@ const ( HandleMigrateProviderFundsKey HandleDealsKey HandleCreateRetrievalTablesKey + HandleCreateAskTablesKey HandleSetShardSelector HandleSetRetrievalAskGetter HandleRetrievalEventsKey @@ -433,7 +433,7 @@ var BoostNode = Options( Override(new(*db.ProposalLogsDB), modules.NewProposalLogsDB), Override(new(*db.FundsDB), modules.NewFundsDB), Override(new(*db.SectorStateDB), modules.NewSectorStateDB), - Override(new(*db.StorageAskDB), modules.NewAskDB), + Override(new(*storedask.StorageAskDB), storedask.NewStorageAskDB), Override(new(*rtvllog.RetrievalLogDB), modules.NewRetrievalLogDB), ) @@ -514,6 +514,7 @@ func ConfigBoost(cfg *config.Boost) Option { Override(new(*sectorstatemgr.SectorStateMgr), sectorstatemgr.NewSectorStateMgr(cfg)), Override(new(*indexprovider.Wrapper), indexprovider.NewWrapper(cfg)), + Override(new(storedask.StoredAsk), storedask.NewStoredAsk(cfg)), Override(new(legacy.LegacyDealManager), modules.NewLegacyDealsManager), Override(new(*storagemarket.ChainDealManager), modules.NewChainDealManager), @@ -534,10 +535,9 @@ func ConfigBoost(cfg *config.Boost) Option { DealPublishControl: []string{cfg.Wallets.PublishStorageDeals}, })), - Override(new(smtypes.AskGetter), storedask.NewStoredAsk(cfg)), - // Lotus Markets Override(new(dtypes.ProviderTransferNetwork), modules.NewProviderTransferNetwork), + Override(new(server.RetrievalAskGetter), server.NewRetrievalAskGetter), Override(new(*server.GraphsyncUnpaidRetrieval), modules.RetrievalGraphsync(cfg.LotusDealmaking.SimultaneousTransfersForStorage, cfg.LotusDealmaking.SimultaneousTransfersForStoragePerClient, cfg.LotusDealmaking.SimultaneousTransfersForRetrieval)), Override(new(dtypes.StagingGraphsync), From(new(*server.GraphsyncUnpaidRetrieval))), Override(StartPieceDoctorKey, modules.NewPieceDoctor), @@ -545,13 +545,6 @@ func ConfigBoost(cfg *config.Boost) Option { // Lotus Markets (retrieval deps) Override(new(sealer.PieceProvider), sealer.NewPieceProvider), - // DAG Store - - // TODO: Not sure how to completely get rid of these yet: - // Error: creating node: starting node: missing dependencies for function "reflect".makeFuncStub (/usr/local/go/src/reflect/asm_amd64.s:30): missing types: *dagstore.DAGStore; *dagstore.Wrapper (did you mean stores.DAGStoreWrapper?) - Override(new(*dagstore.DAGStore), func() *dagstore.DAGStore { return nil }), - Override(new(*mdagstore.Wrapper), func() *mdagstore.Wrapper { return nil }), - Override(new(*bdclient.Store), modules.NewPieceDirectoryStore(cfg)), Override(new(*lib.MultiMinerAccessor), modules.NewMultiminerSectorAccessor(cfg)), Override(new(*piecedirectory.PieceDirectory), modules.NewPieceDirectory(cfg)), @@ -559,7 +552,6 @@ func ConfigBoost(cfg *config.Boost) Option { // Lotus Markets (retrieval) Override(new(server.SectorAccessor), modules.NewSectorAccessor(cfg)), - Override(HandleSetRetrievalAskGetter, server.NewRetrievalAskGetter), Override(HandleRetrievalEventsKey, modules.HandleRetrievalGraphsyncUpdates(time.Duration(cfg.Dealmaking.RetrievalLogDuration), time.Duration(cfg.Dealmaking.StalledRetrievalTimeout))), Override(HandleRetrievalAskKey, modules.HandleQueryAsk), Override(new(*lp2pimpl.TransportsListener), modules.NewTransportsListener(cfg)), diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index d5443a77d..e8214ba6a 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -297,10 +297,6 @@ func NewFundsDB(sqldb *sql.DB) *db.FundsDB { return db.NewFundsDB(sqldb) } -func NewAskDB(sqldb *sql.DB) *db.StorageAskDB { - return db.NewStorageAskDB(sqldb) -} - func HandleQueryAsk(lc fx.Lifecycle, h host.Host, maddr lotus_dtypes.MinerAddress, pd *piecedirectory.PieceDirectory, sa *lib.MultiMinerAccessor, askStore server.RetrievalAskGetter, full v1api.FullNode) { handler := server.NewQueryAskHandler(h, address.Address(maddr), pd, sa, askStore, full) lc.Append(fx.Hook{ @@ -465,9 +461,9 @@ func NewLegacyDealsManager(lc fx.Lifecycle, legacyFSM fsm.Group) legacy.LegacyDe return mgr } -func NewStorageMarketProvider(provAddr address.Address, cfg *config.Boost) func(lc fx.Lifecycle, h host.Host, a v1api.FullNode, sqldb *sql.DB, dealsDB *db.DealsDB, fundMgr *fundmanager.FundManager, storageMgr *storagemanager.StorageManager, sask *storedask.StoredAsk, dp *storageadapter.DealPublisher, secb *sectorblocks.SectorBlocks, commpc types.CommpCalculator, sps sealingpipeline.API, df dtypes.StorageDealFilter, logsSqlDB *LogSqlDB, logsDB *db.LogsDB, piecedirectory *piecedirectory.PieceDirectory, ip *indexprovider.Wrapper, cdm *storagemarket.ChainDealManager) (*storagemarket.Provider, error) { +func NewStorageMarketProvider(provAddr address.Address, cfg *config.Boost) func(lc fx.Lifecycle, h host.Host, a v1api.FullNode, sqldb *sql.DB, dealsDB *db.DealsDB, fundMgr *fundmanager.FundManager, storageMgr *storagemanager.StorageManager, sask storedask.StoredAsk, dp *storageadapter.DealPublisher, secb *sectorblocks.SectorBlocks, commpc types.CommpCalculator, sps sealingpipeline.API, df dtypes.StorageDealFilter, logsSqlDB *LogSqlDB, logsDB *db.LogsDB, piecedirectory *piecedirectory.PieceDirectory, ip *indexprovider.Wrapper, cdm *storagemarket.ChainDealManager) (*storagemarket.Provider, error) { return func(lc fx.Lifecycle, h host.Host, a v1api.FullNode, sqldb *sql.DB, dealsDB *db.DealsDB, - fundMgr *fundmanager.FundManager, storageMgr *storagemanager.StorageManager, sask *storedask.StoredAsk, dp *storageadapter.DealPublisher, secb *sectorblocks.SectorBlocks, + fundMgr *fundmanager.FundManager, storageMgr *storagemanager.StorageManager, sask storedask.StoredAsk, dp *storageadapter.DealPublisher, secb *sectorblocks.SectorBlocks, commpc types.CommpCalculator, sps sealingpipeline.API, df dtypes.StorageDealFilter, logsSqlDB *LogSqlDB, logsDB *db.LogsDB, piecedirectory *piecedirectory.PieceDirectory, ip *indexprovider.Wrapper, cdm *storagemarket.ChainDealManager) (*storagemarket.Provider, error) { @@ -552,10 +548,14 @@ func NewMpoolMonitor(cfg *config.Boost) func(lc fx.Lifecycle, a v1api.FullNode) } } -func NewLegacyDealsFSM(cfg *config.Boost) func(lc fx.Lifecycle, ds *backupds.Datastore) (fsm.Group, error) { - return func(lc fx.Lifecycle, ds *backupds.Datastore) (fsm.Group, error) { +func NewLegacyDealsFSM(cfg *config.Boost) func(lc fx.Lifecycle, mds lotus_dtypes.MetadataDS) (fsm.Group, error) { + return func(lc fx.Lifecycle, mds lotus_dtypes.MetadataDS) (fsm.Group, error) { // Get the deals FSM - provDS := namespace.Wrap(ds, datastore.NewKey("/deals/provider")) + bds, err := backupds.Wrap(mds, "") + if err != nil { + return nil, fmt.Errorf("opening backupds: %w", err) + } + provDS := namespace.Wrap(bds, datastore.NewKey("/deals/provider")) deals, migrate, err := vfsm.NewVersionedFSM(provDS, fsm.Parameters{ StateType: legacytypes.MinerDeal{}, StateKeyField: "State", diff --git a/storagemarket/storedask/create_ask_db.sql b/storagemarket/storedask/create_ask_db.sql new file mode 100644 index 000000000..11b48a358 --- /dev/null +++ b/storagemarket/storedask/create_ask_db.sql @@ -0,0 +1,10 @@ +CREATE TABLE IF NOT EXISTS StorageAsk ( + Price INT, + VerifiedPrice INT, + MinPieceSize INT, + MaxPieceSize INT, + Miner Text, + TS INT, + Expiry INT, + SeqNo INT +); \ No newline at end of file diff --git a/db/storageask.go b/storagemarket/storedask/db.go similarity index 66% rename from db/storageask.go rename to storagemarket/storedask/db.go index cfadc05fa..bcabc3f4d 100644 --- a/db/storageask.go +++ b/storagemarket/storedask/db.go @@ -1,21 +1,52 @@ -package db +package storedask import ( "context" "database/sql" + _ "embed" + "errors" "fmt" + "path" + "github.com/filecoin-project/boost/db" "github.com/filecoin-project/boost/storagemarket/types/legacytypes" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + lotus_repo "github.com/filecoin-project/lotus/node/repo" + "go.uber.org/fx" ) +const AskDBName = "ask.db" + +//go:embed create_ask_db.sql +var createAskDBSQL string + +func createAskTable(ctx context.Context, askDB *sql.DB) error { + if _, err := askDB.ExecContext(ctx, createAskDBSQL); err != nil { + return fmt.Errorf("failed to create tables in ask DB: %w", err) + } + return nil +} + type StorageAskDB struct { db *sql.DB } -func NewStorageAskDB(db *sql.DB) *StorageAskDB { - return &StorageAskDB{db: db} +func NewStorageAskDB(r lotus_repo.LockedRepo) (*StorageAskDB, error) { + dbPath := path.Join(r.Path(), AskDBName+"?cache=shared") + d, err := db.SqlDB(dbPath) + if err != nil { + return nil, err + } + return &StorageAskDB{db: d}, nil +} + +func CreateAskTables(lc fx.Lifecycle, db *StorageAskDB) { + lc.Append(fx.Hook{ + OnStart: func(ctx context.Context) error { + return createAskTable(ctx, db.db) + }, + }) } func (s *StorageAskDB) Update(ctx context.Context, ask legacytypes.StorageAsk) error { @@ -24,7 +55,7 @@ func (s *StorageAskDB) Update(ctx context.Context, ask legacytypes.StorageAsk) e row := s.db.QueryRowContext(ctx, qry, ask.Miner.String()) err := row.Scan(&minerString) switch { - case err == sql.ErrNoRows: + case errors.Is(err, sql.ErrNoRows): return s.set(ctx, ask) case err != nil: return err @@ -36,7 +67,7 @@ func (s *StorageAskDB) Update(ctx context.Context, ask legacytypes.StorageAsk) e func (s *StorageAskDB) set(ctx context.Context, ask legacytypes.StorageAsk) error { qry := "INSERT INTO StorageAsk (Price, VerifiedPrice, MinPieceSize, MaxPieceSize, Miner, TS, Expiry, SeqNo) " qry += "VALUES (?, ?, ?, ?, ?, ?, ?, ?)" - values := []interface{}{ask.Price, ask.VerifiedPrice, ask.MinPieceSize, ask.MaxPieceSize, ask.Miner.String(), ask.Timestamp, ask.Expiry, ask.SeqNo} + values := []interface{}{ask.Price.Int64(), ask.VerifiedPrice.Int64(), ask.MinPieceSize, ask.MaxPieceSize, ask.Miner.String(), ask.Timestamp, ask.Expiry, ask.SeqNo} _, err := s.db.ExecContext(ctx, qry, values...) return err } @@ -45,7 +76,7 @@ func (s *StorageAskDB) update(ctx context.Context, ask legacytypes.StorageAsk) e qry := "UPDATE StorageAsk (Price, VerifiedPrice, MinPieceSize, MaxPieceSize, TS, Expiry, SeqNo) " qry += "VALUES (?, ?, ?, ?, ?, ?, ?, ?) " qry += "WHERE Miner=?" - values := []interface{}{ask.Price, ask.VerifiedPrice, ask.MinPieceSize, ask.MaxPieceSize, ask.Timestamp, ask.Expiry, ask.SeqNo, ask.Miner.String()} + values := []interface{}{ask.Price.Int64(), ask.VerifiedPrice.Int64(), ask.MinPieceSize, ask.MaxPieceSize, ask.Timestamp, ask.Expiry, ask.SeqNo, ask.Miner.String()} _, err := s.db.ExecContext(ctx, qry, values...) return err } diff --git a/storagemarket/storedask/storedask.go b/storagemarket/storedask/storedask.go index 930b1a166..54ba345f1 100644 --- a/storagemarket/storedask/storedask.go +++ b/storagemarket/storedask/storedask.go @@ -7,7 +7,6 @@ import ( "fmt" "sync" - "github.com/filecoin-project/boost/db" "github.com/filecoin-project/boost/markets/shared" "github.com/filecoin-project/boost/node/config" "github.com/filecoin-project/boost/storagemarket/types/legacytypes" @@ -40,26 +39,36 @@ const DefaultMinPieceSize abi.PaddedPieceSize = 256 // TODO: It would be nice to default this to the miner's sector size const DefaultMaxPieceSize abi.PaddedPieceSize = 1 << 20 -type StoredAsk struct { +type StoredAsk interface { + GetAsk(miner address.Address) *legacytypes.SignedStorageAsk + SetAsk(ctx context.Context, price abi.TokenAmount, verifiedPrice abi.TokenAmount, duration abi.ChainEpoch, miner address.Address, options ...legacytypes.StorageAskOption) error +} + +type storedAsk struct { askLk sync.RWMutex asks map[address.Address]*legacytypes.SignedStorageAsk fullNode api.FullNode - db *db.StorageAskDB + db *StorageAskDB } // NewStoredAsk returns a new instance of StoredAsk // It will initialize a new SignedStorageAsk on disk if one is not set // Otherwise it loads the current SignedStorageAsk from disk -func NewStoredAsk(cfg *config.Boost) func(lc fx.Lifecycle, db *db.StorageAskDB, fullNode api.FullNode) (*StoredAsk, error) { - return func(lc fx.Lifecycle, db *db.StorageAskDB, fullNode api.FullNode) (*StoredAsk, error) { - s := &StoredAsk{ +func NewStoredAsk(cfg *config.Boost) func(lc fx.Lifecycle, askdb *StorageAskDB, fullNode api.FullNode) (*storedAsk, error) { + return func(lc fx.Lifecycle, askdb *StorageAskDB, fullNode api.FullNode) (*storedAsk, error) { + ctx := context.Background() + + err := createAskTable(ctx, askdb.db) + if err != nil { + return nil, err + } + + s := &storedAsk{ fullNode: fullNode, - db: db, + db: askdb, asks: make(map[address.Address]*legacytypes.SignedStorageAsk), } - ctx := context.Background() - var minerIDs []address.Address miner, err := address.NewFromString(cfg.Wallets.Miner) if err != nil { @@ -115,7 +124,7 @@ func getMinerWorkerAddress(ctx context.Context, maddr address.Address, tok share return mi.Worker, nil } -func (s *StoredAsk) sign(ctx context.Context, ask *legacytypes.StorageAsk) (*crypto.Signature, error) { +func (s *storedAsk) sign(ctx context.Context, ask *legacytypes.StorageAsk) (*crypto.Signature, error) { tok, err := s.fullNode.ChainHead(ctx) if err != nil { return nil, err @@ -143,14 +152,14 @@ func signMinerData(ctx context.Context, data interface{}, address address.Addres return sig, nil } -func (s *StoredAsk) GetAsk(miner address.Address) *legacytypes.SignedStorageAsk { +func (s *storedAsk) GetAsk(miner address.Address) *legacytypes.SignedStorageAsk { s.askLk.RLock() defer s.askLk.RUnlock() return s.asks[miner] } -func (s *StoredAsk) SetAsk(ctx context.Context, price abi.TokenAmount, verifiedPrice abi.TokenAmount, duration abi.ChainEpoch, miner address.Address, options ...legacytypes.StorageAskOption) error { +func (s *storedAsk) SetAsk(ctx context.Context, price abi.TokenAmount, verifiedPrice abi.TokenAmount, duration abi.ChainEpoch, miner address.Address, options ...legacytypes.StorageAskOption) error { s.askLk.Lock() defer s.askLk.Unlock() var seqno uint64 @@ -196,7 +205,7 @@ func (s *StoredAsk) SetAsk(ctx context.Context, price abi.TokenAmount, verifiedP } -func (s *StoredAsk) getSignedAsk(ctx context.Context, miner address.Address) (legacytypes.SignedStorageAsk, error) { +func (s *storedAsk) getSignedAsk(ctx context.Context, miner address.Address) (legacytypes.SignedStorageAsk, error) { ask, err := s.db.Get(ctx, miner) if err != nil { return legacytypes.SignedStorageAsk{}, err @@ -212,6 +221,6 @@ func (s *StoredAsk) getSignedAsk(ctx context.Context, miner address.Address) (le }, nil } -func (s *StoredAsk) storeAsk(ctx context.Context, ask legacytypes.StorageAsk) error { +func (s *storedAsk) storeAsk(ctx context.Context, ask legacytypes.StorageAsk) error { return s.db.Update(ctx, ask) } From fe32d7bf9c88410a1081298e8b46cd680d2c723b Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Mon, 23 Oct 2023 16:07:45 +0400 Subject: [PATCH 09/34] fix itests --- .circleci/config.yml | 8 +- db/migrations/20231005140947_create_ask.sql | 18 -- itests/dummydeal_test.go | 2 +- itests/framework/framework.go | 236 ++++++++++++------ itests/graphsync_identity_cid_test.go | 15 +- itests/graphsync_retrieval_test.go | 11 +- itests/multiminer_retrieval_graphsync_test.go | 2 +- itests/shared/multiminer.go | 11 +- node/builder.go | 1 - storagemarket/storedask/db.go | 9 - storagemarket/storedask/storedask.go | 6 +- 11 files changed, 201 insertions(+), 118 deletions(-) delete mode 100644 db/migrations/20231005140947_create_ask.sql diff --git a/.circleci/config.yml b/.circleci/config.yml index 800c52572..0a609629b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -319,13 +319,13 @@ workflows: target: "./itests/dummydeal_test.go" - test: - name: test-itest-markets_v1_identity_cid - suite: itest-markets_v1_identity_cid + name: test-graphsync_identity_cid + suite: itest-graphsync_identity_cid target: "./itests/graphsync_identity_cid_test.go" - test: - name: test-itest-markets_v1_retrieval - suite: itest-markets_v1_retrieval + name: test-itest-retrieval + suite: itest-retrieval target: "./itests/graphsync_retrieval_test.go" - test: diff --git a/db/migrations/20231005140947_create_ask.sql b/db/migrations/20231005140947_create_ask.sql deleted file mode 100644 index 5069a4d30..000000000 --- a/db/migrations/20231005140947_create_ask.sql +++ /dev/null @@ -1,18 +0,0 @@ --- +goose Up --- +goose StatementBegin -CREATE TABLE IF NOT EXISTS StorageAsk ( - Price INT, - VerifiedPrice INT, - MinPieceSize INT, - MaxPieceSize INT, - Miner Text, - TS INT, - Expiry INT, - SeqNo INT -); --- +goose StatementEnd - --- +goose Down --- +goose StatementBegin -DROP TABLE IF EXISTS StorageAsk; --- +goose StatementEnd \ No newline at end of file diff --git a/itests/dummydeal_test.go b/itests/dummydeal_test.go index 1de5afeb3..2a7c9a692 100644 --- a/itests/dummydeal_test.go +++ b/itests/dummydeal_test.go @@ -94,6 +94,6 @@ func TestDummydealOnline(t *testing.T) { require.NoError(t, err) // rootCid is an identity CID - outFile := f.RetrieveDirect(ctx, t, rootCid, &res.DealParams.ClientDealProposal.Proposal.PieceCID, true, nil) + outFile := f.Retrieve(ctx, t, tempdir, rootCid, res.DealParams.ClientDealProposal.Proposal.PieceCID, true, nil) kit.AssertFilesEqual(t, randomFilepath, outFile) } diff --git a/itests/framework/framework.go b/itests/framework/framework.go index e3c746ac2..a60ff29af 100644 --- a/itests/framework/framework.go +++ b/itests/framework/framework.go @@ -8,17 +8,22 @@ import ( "math/rand" "os" "path" + "strings" "sync" "testing" "time" + "github.com/dustin/go-humanize" "github.com/filecoin-project/boost/api" + clinode "github.com/filecoin-project/boost/cli/node" boostclient "github.com/filecoin-project/boost/client" "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/markets/utils" "github.com/filecoin-project/boost/node" "github.com/filecoin-project/boost/node/config" "github.com/filecoin-project/boost/node/modules/dtypes" "github.com/filecoin-project/boost/node/repo" + rc "github.com/filecoin-project/boost/retrievalmarket/client" "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" "github.com/filecoin-project/boost/storagemarket" "github.com/filecoin-project/boost/storagemarket/types" @@ -50,6 +55,9 @@ import ( "github.com/filecoin-project/lotus/storage/pipeline/sealiface" "github.com/filecoin-project/lotus/storage/sealer/storiface" "github.com/google/uuid" + "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/exchange/offline" "github.com/ipfs/boxo/files" dag "github.com/ipfs/boxo/ipld/merkledag" dstest "github.com/ipfs/boxo/ipld/merkledag/test" @@ -57,27 +65,33 @@ import ( blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" + flatfs "github.com/ipfs/go-ds-flatfs" + levelds "github.com/ipfs/go-ds-leveldb" ipldcbor "github.com/ipfs/go-ipld-cbor" ipldformat "github.com/ipfs/go-ipld-format" logging "github.com/ipfs/go-log/v2" "github.com/ipld/go-car" "github.com/ipld/go-ipld-prime" - "github.com/ipld/go-ipld-prime/codec/dagjson" "github.com/ipld/go-ipld-prime/datamodel" - "github.com/ipld/go-ipld-prime/traversal/selector" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/ipld/go-ipld-prime/traversal" "github.com/libp2p/go-libp2p" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/peerstore" "github.com/multiformats/go-multihash" "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" + "golang.org/x/term" + "golang.org/x/xerrors" ) var Log = logging.Logger("boosttest") type TestFrameworkConfig struct { - Ensemble *kit.Ensemble - EnableLegacy bool + Ensemble *kit.Ensemble + EnableLegacy bool + MaxStagingDealsBytes int64 } type TestFramework struct { @@ -109,6 +123,12 @@ func WithEnsemble(e *kit.Ensemble) FrameworkOpts { } } +func WithMaxStagingDealsBytes(e int64) FrameworkOpts { + return func(tmc *TestFrameworkConfig) { + tmc.MaxStagingDealsBytes = e + } +} + func NewTestFramework(ctx context.Context, t *testing.T, opts ...FrameworkOpts) *TestFramework { fmc := &TestFrameworkConfig{} for _, opt := range opts { @@ -327,6 +347,9 @@ func (f *TestFramework) Start(opts ...ConfigOpt) error { } cfg.LotusFees.MaxPublishDealsFee = val cfg.Dealmaking.MaxStagingDealsBytes = 4000000 // 4 MB + if f.config.MaxStagingDealsBytes > 4000000 { + cfg.Dealmaking.MaxStagingDealsBytes = f.config.MaxStagingDealsBytes + } cfg.Dealmaking.RemoteCommp = true // No transfers will start until the first stall check period has elapsed cfg.Dealmaking.HttpTransferStallCheckPeriod = config.Duration(100 * time.Millisecond) @@ -741,18 +764,6 @@ func (f *TestFramework) WaitDealSealed(ctx context.Context, deal *cid.Cid) error } } -func (f *TestFramework) Retrieve(ctx context.Context, t *testing.T, deal *cid.Cid, root cid.Cid, extractCar bool, selectorNode datamodel.Node) string { - // perform retrieval. - info, err := f.FullNode.ClientGetDealInfo(ctx, *deal) - require.NoError(t, err) - - offers, err := f.FullNode.ClientFindData(ctx, root, &info.PieceCID) - require.NoError(t, err) - require.NotEmpty(t, offers, "no offers") - - return f.retrieve(ctx, t, offers[0], extractCar, selectorNode) -} - func (f *TestFramework) ExtractFileFromCAR(ctx context.Context, t *testing.T, file *os.File) string { bserv := dstest.Bserv() ch, err := car.LoadCar(ctx, bserv.Blockstore(), file) @@ -788,81 +799,108 @@ func (f *TestFramework) ExtractFileFromCAR(ctx context.Context, t *testing.T, fi return tmpFile } -func (f *TestFramework) RetrieveDirect(ctx context.Context, t *testing.T, root cid.Cid, pieceCid *cid.Cid, extractCar bool, selectorNode datamodel.Node) string { - offer, err := f.FullNode.ClientMinerQueryOffer(ctx, f.MinerAddr, root, pieceCid) +func (f *TestFramework) Retrieve(ctx context.Context, t *testing.T, tempdir string, root cid.Cid, pieceCid cid.Cid, extractCar bool, selectorNode datamodel.Node) string { + clientPath := path.Join(tempdir, "client") + _ = os.Mkdir(clientPath, 0755) + + clientNode, err := clinode.Setup(clientPath) require.NoError(t, err) - return f.retrieve(ctx, t, offer, extractCar, selectorNode) -} + addr, err := clientNode.Wallet.GetDefault() + require.NoError(t, err) -func (f *TestFramework) retrieve(ctx context.Context, t *testing.T, offer lapi.QueryOffer, extractCar bool, selectorNode datamodel.Node) string { - p := path.Join(t.TempDir(), "ret-car-"+t.Name()) - err := os.MkdirAll(path.Dir(p), 0755) + bstoreDatastore, err := flatfs.CreateOrOpen(path.Join(tempdir, "blockstore"), flatfs.NextToLast(3), false) + bstore := blockstore.NewBlockstore(bstoreDatastore, blockstore.NoPrefix()) require.NoError(t, err) - carFile, err := os.Create(p) + + //ds, err := levelds.NewDatastore(path.Join(clientPath, "dstore"), nil) + ds, err := levelds.NewDatastore("", nil) require.NoError(t, err) - defer carFile.Close() //nolint:errcheck + // Create the retrieval client + fc, err := rc.NewClient(clientNode.Host, f.FullNode, clientNode.Wallet, addr, bstore, ds, clientPath) + require.NoError(t, err) - caddr, err := f.FullNode.WalletDefaultAddress(ctx) + baddrs, err := f.Boost.NetAddrsListen(ctx) require.NoError(t, err) - updatesCtx, cancel := context.WithCancel(ctx) - updates, err := f.FullNode.ClientGetRetrievalUpdates(updatesCtx) + query, err := RetrievalQuery(ctx, t, clientNode, &baddrs, pieceCid) require.NoError(t, err) - order := offer.Order(caddr) - if selectorNode != nil { - _, err := selector.CompileSelector(selectorNode) - require.NoError(t, err) - jsonSelector, err := ipld.Encode(selectorNode, dagjson.Encode) + proposal, err := rc.RetrievalProposalForAsk(query, root, selectorNode) + require.NoError(t, err) + + // Retrieve the data + _, err = fc.RetrieveContentWithProgressCallback( + ctx, + f.MinerAddr, + proposal, + func(bytesReceived_ uint64) { + printProgress(bytesReceived_, t) + }, + ) + require.NoError(t, err) + + dservOffline := dag.NewDAGService(blockservice.New(bstore, offline.Exchange(bstore))) + + // if we used a selector - need to find the sub-root the user actually wanted to retrieve + if !selectorNode.IsNull() { + var subRootFound bool + err := utils.TraverseDag( + ctx, + dservOffline, + root, + selectorNode, + func(p traversal.Progress, n ipld.Node, r traversal.VisitReason) error { + if r == traversal.VisitReason_SelectionMatch { + + if p.LastBlock.Path.String() != p.Path.String() { + return xerrors.Errorf("unsupported selection path '%s' does not correspond to a node boundary (a.k.a. CID link)", p.Path.String()) + } + + cidLnk, castOK := p.LastBlock.Link.(cidlink.Link) + if !castOK { + return xerrors.Errorf("cidlink cast unexpectedly failed on '%s'", p.LastBlock.Link.String()) + } + + root = cidLnk.Cid + subRootFound = true + } + return nil + }, + ) require.NoError(t, err) - sel := lapi.Selector(jsonSelector) - order.DataSelector = &sel + require.True(t, subRootFound) } - retrievalRes, err := f.FullNode.ClientRetrieve(ctx, order) + dnode, err := dservOffline.Get(ctx, root) require.NoError(t, err) -consumeEvents: - for { - var evt lapi.RetrievalInfo - select { - case <-updatesCtx.Done(): - t.Fatal("Retrieval Timed Out") - case evt = <-updates: - if evt.ID != retrievalRes.DealID { - continue - } - } - switch legacyretrievaltypes.DealStatus(evt.Status) { - case legacyretrievaltypes.DealStatusCompleted: - break consumeEvents - case legacyretrievaltypes.DealStatusRejected: - t.Fatalf("Retrieval Proposal Rejected: %s", evt.Message) - case - legacyretrievaltypes.DealStatusDealNotFound, - legacyretrievaltypes.DealStatusErrored: - t.Fatalf("Retrieval Error: %s", evt.Message) - } - } - cancel() - require.NoError(t, f.FullNode.ClientExport(ctx, - lapi.ExportRef{ - Root: offer.Root, - DealID: retrievalRes.DealID, - }, - lapi.FileRef{ - Path: carFile.Name(), - IsCAR: true, - })) + var out string + + if !extractCar { + // Write file as car file + file, err := os.CreateTemp(path.Join(tempdir, "retrievals"), "*"+root.String()+".car") + require.NoError(t, err) + out = file.Name() + err = car.WriteCar(ctx, dservOffline, []cid.Cid{root}, file) + require.NoError(t, err) + + } else { + // Otherwise write file as UnixFS File + ufsFile, err := unixfile.NewUnixfsFile(ctx, dservOffline, dnode) + require.NoError(t, err) + file, err := os.CreateTemp(path.Join(tempdir, "retrievals"), "*"+root.String()) + err = file.Close() + require.NoError(t, err) + err = os.Remove(file.Name()) + require.NoError(t, err) + err = files.WriteTo(ufsFile, file.Name()) + require.NoError(t, err) - ret := carFile.Name() - if extractCar { - ret = f.ExtractFileFromCAR(ctx, t, carFile) } - return ret + return out } type RetrievalInfo struct { @@ -902,3 +940,57 @@ type DataTransferChannel struct { Transferred uint64 Stages *datatransfer.ChannelStages } + +func printProgress(bytesReceived uint64, t *testing.T) { + str := fmt.Sprintf("%v (%v)", bytesReceived, humanize.IBytes(bytesReceived)) + + termWidth, _, err := term.GetSize(int(os.Stdin.Fd())) + strLen := len(str) + if err == nil { + + if strLen < termWidth { + // If the string is shorter than the terminal width, pad right side + // with spaces to remove old text + str = strings.Join([]string{str, strings.Repeat(" ", termWidth-strLen)}, "") + } else if strLen > termWidth { + // If the string doesn't fit in the terminal, cut it down to a size + // that fits + str = str[:termWidth] + } + } + + t.Logf("%s\r", str) +} + +func RetrievalQuery(ctx context.Context, t *testing.T, client *clinode.Node, peerAddr *peer.AddrInfo, pcid cid.Cid) (*legacyretrievaltypes.QueryResponse, error) { + client.Host.Peerstore().AddAddrs(peerAddr.ID, peerAddr.Addrs, peerstore.TempAddrTTL) + s, err := client.Host.NewStream(ctx, peerAddr.ID, rc.RetrievalQueryProtocol) + require.NoError(t, err) + + client.Host.ConnManager().Protect(s.Conn().RemotePeer(), "RetrievalQuery") + defer func() { + client.Host.ConnManager().Unprotect(s.Conn().RemotePeer(), "RetrievalQuery") + s.Close() + }() + + // We have connected + + q := &legacyretrievaltypes.Query{ + PayloadCID: pcid, + } + + var resp legacyretrievaltypes.QueryResponse + dline, ok := ctx.Deadline() + if ok { + _ = s.SetDeadline(dline) + defer func() { _ = s.SetDeadline(time.Time{}) }() + } + + err = cborutil.WriteCborRPC(s, q) + require.NoError(t, err) + + err = cborutil.ReadCborRPC(s, &resp) + require.NoError(t, err) + + return &resp, nil +} diff --git a/itests/graphsync_identity_cid_test.go b/itests/graphsync_identity_cid_test.go index 13cd37e58..88c0a32f5 100644 --- a/itests/graphsync_identity_cid_test.go +++ b/itests/graphsync_identity_cid_test.go @@ -13,6 +13,7 @@ import ( gstestutil "github.com/filecoin-project/boost-graphsync/testutil" "github.com/filecoin-project/boost/itests/framework" "github.com/filecoin-project/boost/testutil" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/itests/kit" "github.com/google/uuid" "github.com/ipfs/go-cid" @@ -28,7 +29,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestMarketsV1DealAndRetrievalWithIdentityCID(t *testing.T) { +func TestDealAndRetrievalWithIdentityCID(t *testing.T) { req := require.New(t) ctx := context.Background() log := framework.Log @@ -37,11 +38,15 @@ func TestMarketsV1DealAndRetrievalWithIdentityCID(t *testing.T) { framework.SetLogLevel() var opts []framework.FrameworkOpts opts = append(opts, framework.EnableLegacyDeals(true)) + opts = append(opts, framework.WithMaxStagingDealsBytes(10000000)) f := framework.NewTestFramework(ctx, t, opts...) err := f.Start() req.NoError(err) defer f.Stop() + err = f.AddClientProviderBalance(abi.NewTokenAmount(1e15)) + require.NoError(t, err) + // Create a CAR file tempdir := t.TempDir() log.Debugw("using tempdir", "dir", tempdir) @@ -105,13 +110,17 @@ func TestMarketsV1DealAndRetrievalWithIdentityCID(t *testing.T) { log.Debugw("got response from MarketDummyDeal", "res", spew.Sdump(res)) dealCid, err := res.DealParams.ClientDealProposal.Proposal.Cid() require.NoError(t, err) + pieceCid := res.DealParams.ClientDealProposal.Proposal.PieceCID + log.Infof("deal ID is : %s", dealCid.String()) + // Wait for the first deal to be added to a sector and cleaned up so space is made + err = f.WaitForDealAddedToSector(dealUuid) - time.Sleep(2 * time.Second) + time.Sleep(5 * time.Second) // Deal is stored and sealed, attempt different retrieval forms log.Debugw("deal is sealed, starting retrieval", "cid", dealCid.String(), "root", root.String()) - outPath := f.Retrieve(ctx, t, &dealCid, root, false, selectorparse.CommonSelector_ExploreAllRecursively) + outPath := f.Retrieve(ctx, t, tempdir, root, pieceCid, false, selectorparse.CommonSelector_ExploreAllRecursively) // Inspect what we got gotCids, err := testutil.CidsInCar(outPath) diff --git a/itests/graphsync_retrieval_test.go b/itests/graphsync_retrieval_test.go index d09b096a9..8e1810687 100644 --- a/itests/graphsync_retrieval_test.go +++ b/itests/graphsync_retrieval_test.go @@ -10,6 +10,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/filecoin-project/boost/itests/framework" "github.com/filecoin-project/boost/testutil" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/itests/kit" "github.com/google/uuid" "github.com/ipfs/go-cid" @@ -20,7 +21,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestMarketsV1DealRetrieval(t *testing.T) { +func TestDealRetrieval(t *testing.T) { ctx := context.Background() log := framework.Log @@ -28,11 +29,15 @@ func TestMarketsV1DealRetrieval(t *testing.T) { framework.SetLogLevel() var opts []framework.FrameworkOpts opts = append(opts, framework.EnableLegacyDeals(true)) + opts = append(opts, framework.WithMaxStagingDealsBytes(10000000)) f := framework.NewTestFramework(ctx, t, opts...) err := f.Start() require.NoError(t, err) defer f.Stop() + err = f.AddClientProviderBalance(abi.NewTokenAmount(1e15)) + require.NoError(t, err) + // Create a CAR file tempdir := t.TempDir() log.Debugw("using tempdir", "dir", tempdir) @@ -103,7 +108,7 @@ func TestMarketsV1DealRetrieval(t *testing.T) { log.Debugw("got deal proposal cid", "cid", dealCid.String()) - err = f.WaitDealSealed(ctx, &dealCid) + err = f.WaitForDealAddedToSector(res.DealParams.DealUUID) require.NoError(t, err) // Deal is stored and sealed, attempt different retrieval forms @@ -178,7 +183,7 @@ func TestMarketsV1DealRetrieval(t *testing.T) { } log.Debugw("deal is sealed, starting retrieval", "cid", dealCid.String(), "root", root) - outPath := f.Retrieve(ctx, t, &dealCid, root, false, selNode) + outPath := f.Retrieve(ctx, t, tempdir, root, res.DealParams.ClientDealProposal.Proposal.PieceCID, false, selNode) // Inspect what we got gotCids, err := testutil.CidsInCar(outPath) diff --git a/itests/multiminer_retrieval_graphsync_test.go b/itests/multiminer_retrieval_graphsync_test.go index 1ef4b52ac..c79b9f8be 100644 --- a/itests/multiminer_retrieval_graphsync_test.go +++ b/itests/multiminer_retrieval_graphsync_test.go @@ -17,7 +17,7 @@ func TestMultiMinerRetrievalGraphsync(t *testing.T) { // - recognize that the deal is for a sector on the first miner // - read the data for the deal from the first miner t.Logf("deal is added to piece, starting retrieval of root %s", rt.RootCid) - outPath := rt.BoostAndMiner2.RetrieveDirect(ctx, t, rt.RootCid, nil, true, nil) + outPath := rt.BoostAndMiner2.Retrieve(ctx, t, rt.TempDir, rt.RootCid, rt.PieceCid, true, nil) t.Logf("retrieval is done, compare in- and out- files in: %s, out: %s", rt.SampleFilePath, outPath) kit.AssertFilesEqual(t, rt.SampleFilePath, outPath) diff --git a/itests/shared/multiminer.go b/itests/shared/multiminer.go index f66b4a53b..18479eba9 100644 --- a/itests/shared/multiminer.go +++ b/itests/shared/multiminer.go @@ -2,6 +2,10 @@ package shared import ( "context" + "path/filepath" + "testing" + "time" + "github.com/davecgh/go-spew/spew" "github.com/filecoin-project/boost/itests/framework" "github.com/filecoin-project/boost/node/config" @@ -11,9 +15,6 @@ import ( "github.com/google/uuid" "github.com/ipfs/go-cid" "github.com/stretchr/testify/require" - "path/filepath" - "testing" - "time" ) type RetrievalTest struct { @@ -21,6 +22,8 @@ type RetrievalTest struct { BoostAndMiner2 *framework.TestFramework SampleFilePath string RootCid cid.Cid + PieceCid cid.Cid + TempDir string } func RunMultiminerRetrievalTest(t *testing.T, rt func(ctx context.Context, t *testing.T, rt *RetrievalTest)) { @@ -107,5 +110,7 @@ func RunMultiminerRetrievalTest(t *testing.T, rt func(ctx context.Context, t *te BoostAndMiner2: boostAndMiner2, SampleFilePath: randomFilepath, RootCid: rootCid, + PieceCid: res.DealParams.ClientDealProposal.Proposal.PieceCID, + TempDir: tempdir, }) } diff --git a/node/builder.go b/node/builder.go index 9325283e1..ce894fb69 100644 --- a/node/builder.go +++ b/node/builder.go @@ -150,7 +150,6 @@ const ( HandleMigrateProviderFundsKey HandleDealsKey HandleCreateRetrievalTablesKey - HandleCreateAskTablesKey HandleSetShardSelector HandleSetRetrievalAskGetter HandleRetrievalEventsKey diff --git a/storagemarket/storedask/db.go b/storagemarket/storedask/db.go index bcabc3f4d..ec9356e3a 100644 --- a/storagemarket/storedask/db.go +++ b/storagemarket/storedask/db.go @@ -13,7 +13,6 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" lotus_repo "github.com/filecoin-project/lotus/node/repo" - "go.uber.org/fx" ) const AskDBName = "ask.db" @@ -41,14 +40,6 @@ func NewStorageAskDB(r lotus_repo.LockedRepo) (*StorageAskDB, error) { return &StorageAskDB{db: d}, nil } -func CreateAskTables(lc fx.Lifecycle, db *StorageAskDB) { - lc.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { - return createAskTable(ctx, db.db) - }, - }) -} - func (s *StorageAskDB) Update(ctx context.Context, ask legacytypes.StorageAsk) error { var minerString string qry := "SELECT Miner FROM StorageAsk WHERE Miner=?;" diff --git a/storagemarket/storedask/storedask.go b/storagemarket/storedask/storedask.go index 54ba345f1..e547f830c 100644 --- a/storagemarket/storedask/storedask.go +++ b/storagemarket/storedask/storedask.go @@ -24,10 +24,10 @@ import ( var log = logging.Logger("storedask") // DefaultPrice is the default price for unverified deals (in attoFil / GiB / Epoch) -var DefaultPrice = abi.NewTokenAmount(500000000) +var DefaultPrice = abi.NewTokenAmount(50000000) // DefaultVerifiedPrice is the default price for verified deals (in attoFil / GiB / Epoch) -var DefaultVerifiedPrice = abi.NewTokenAmount(50000000) +var DefaultVerifiedPrice = abi.NewTokenAmount(5000000) // DefaultDuration is the default number of epochs a storage ask is in effect for const DefaultDuration abi.ChainEpoch = 1000000 @@ -37,7 +37,7 @@ const DefaultMinPieceSize abi.PaddedPieceSize = 256 // DefaultMaxPieceSize is the default maximum accepted size for pieces for deals // TODO: It would be nice to default this to the miner's sector size -const DefaultMaxPieceSize abi.PaddedPieceSize = 1 << 20 +const DefaultMaxPieceSize abi.PaddedPieceSize = 32 << 30 type StoredAsk interface { GetAsk(miner address.Address) *legacytypes.SignedStorageAsk From 55bb869031a4707fb6ea135f73b8e3bbb352f1ba Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Tue, 24 Oct 2023 18:23:58 +0400 Subject: [PATCH 10/34] clean up mocks --- datatransfer/transport/graphsync/graphsync.go | 140 +++++++++--------- datatransfer/types_cbor_gen.go | 6 +- go.mod | 4 +- retrievalmarket/mock/gen.go | 2 +- retrievalmarket/mock/piecestore.go | 14 +- retrievalmarket/server/gsunpaidretrieval.go | 78 ++++++---- retrievalmarket/types/voucher_legs.go | 38 ++--- 7 files changed, 146 insertions(+), 136 deletions(-) diff --git a/datatransfer/transport/graphsync/graphsync.go b/datatransfer/transport/graphsync/graphsync.go index e2de26926..3009b7941 100644 --- a/datatransfer/transport/graphsync/graphsync.go +++ b/datatransfer/transport/graphsync/graphsync.go @@ -9,7 +9,7 @@ import ( graphsync "github.com/filecoin-project/boost-graphsync" "github.com/filecoin-project/boost-graphsync/donotsendfirstblocks" - datatransfer2 "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/datatransfer" "github.com/filecoin-project/boost/datatransfer/transport/graphsync/extension" logging "github.com/ipfs/go-log/v2" "github.com/ipld/go-ipld-prime" @@ -50,14 +50,14 @@ func SupportedExtensions(supportedExtensions []graphsync.ExtensionName) Option { } // RegisterCompletedRequestListener is used by the tests -func RegisterCompletedRequestListener(l func(channelID datatransfer2.ChannelID)) Option { +func RegisterCompletedRequestListener(l func(channelID datatransfer.ChannelID)) Option { return func(t *Transport) { t.completedRequestListener = l } } // RegisterCompletedResponseListener is used by the tests -func RegisterCompletedResponseListener(l func(channelID datatransfer2.ChannelID)) Option { +func RegisterCompletedResponseListener(l func(channelID datatransfer.ChannelID)) Option { return func(t *Transport) { t.completedResponseListener = l } @@ -66,18 +66,18 @@ func RegisterCompletedResponseListener(l func(channelID datatransfer2.ChannelID) // Transport manages graphsync hooks for data transfer, translating from // graphsync hooks to semantic data transfer events type Transport struct { - events datatransfer2.EventsHandler + events datatransfer.EventsHandler gs graphsync.GraphExchange peerID peer.ID supportedExtensions []graphsync.ExtensionName unregisterFuncs []graphsync.UnregisterHookFunc - completedRequestListener func(channelID datatransfer2.ChannelID) - completedResponseListener func(channelID datatransfer2.ChannelID) + completedRequestListener func(channelID datatransfer.ChannelID) + completedResponseListener func(channelID datatransfer.ChannelID) // Map from data transfer channel ID to information about that channel dtChannelsLk sync.RWMutex - dtChannels map[datatransfer2.ChannelID]*dtChannel + dtChannels map[datatransfer.ChannelID]*dtChannel // Used in graphsync callbacks to map from graphsync request to the // associated data-transfer channel ID. @@ -90,7 +90,7 @@ func NewTransport(peerID peer.ID, gs graphsync.GraphExchange, options ...Option) gs: gs, peerID: peerID, supportedExtensions: defaultSupportedExtensions, - dtChannels: make(map[datatransfer2.ChannelID]*dtChannel), + dtChannels: make(map[datatransfer.ChannelID]*dtChannel), requestIDToChannelID: newRequestIDToChannelIDMap(), } for _, option := range options { @@ -107,14 +107,14 @@ func NewTransport(peerID peer.ID, gs graphsync.GraphExchange, options ...Option) func (t *Transport) OpenChannel( ctx context.Context, dataSender peer.ID, - channelID datatransfer2.ChannelID, + channelID datatransfer.ChannelID, root ipld.Link, stor ipld.Node, - channel datatransfer2.ChannelState, - msg datatransfer2.Message, + channel datatransfer.ChannelState, + msg datatransfer.Message, ) error { if t.events == nil { - return datatransfer2.ErrHandlerNotSet + return datatransfer.ErrHandlerNotSet } exts, err := extension.ToExtensionData(msg, t.supportedExtensions) @@ -147,7 +147,7 @@ func (t *Transport) OpenChannel( // Get the extension data for sending a Restart message, depending on the // protocol version of the peer -func (t *Transport) getRestartExtension(ctx context.Context, p peer.ID, channel datatransfer2.ChannelState) ([]graphsync.ExtensionData, error) { +func (t *Transport) getRestartExtension(ctx context.Context, p peer.ID, channel datatransfer.ChannelState) ([]graphsync.ExtensionData, error) { if channel == nil { return nil, nil } @@ -155,7 +155,7 @@ func (t *Transport) getRestartExtension(ctx context.Context, p peer.ID, channel } // Skip the first N blocks because they were already received -func getDoNotSendFirstBlocksExtension(channel datatransfer2.ChannelState) ([]graphsync.ExtensionData, error) { +func getDoNotSendFirstBlocksExtension(channel datatransfer.ChannelState) ([]graphsync.ExtensionData, error) { skipBlockCount := channel.ReceivedCidsTotal() data := donotsendfirstblocks.EncodeDoNotSendFirstBlocks(skipBlockCount) return []graphsync.ExtensionData{{ @@ -232,7 +232,7 @@ func (t *Transport) executeGsRequest(req *gsReq) { } // PauseChannel pauses the given data-transfer channel -func (t *Transport) PauseChannel(ctx context.Context, chid datatransfer2.ChannelID) error { +func (t *Transport) PauseChannel(ctx context.Context, chid datatransfer.ChannelID) error { ch, err := t.getDTChannel(chid) if err != nil { return err @@ -244,8 +244,8 @@ func (t *Transport) PauseChannel(ctx context.Context, chid datatransfer2.Channel // if there is one func (t *Transport) ResumeChannel( ctx context.Context, - msg datatransfer2.Message, - chid datatransfer2.ChannelID, + msg datatransfer.Message, + chid datatransfer.ChannelID, ) error { ch, err := t.getDTChannel(chid) if err != nil { @@ -255,7 +255,7 @@ func (t *Transport) ResumeChannel( } // CloseChannel closes the given data-transfer channel -func (t *Transport) CloseChannel(ctx context.Context, chid datatransfer2.ChannelID) error { +func (t *Transport) CloseChannel(ctx context.Context, chid datatransfer.ChannelID) error { ch, err := t.getDTChannel(chid) if err != nil { return err @@ -270,7 +270,7 @@ func (t *Transport) CloseChannel(ctx context.Context, chid datatransfer2.Channel // CleanupChannel is called on the otherside of a cancel - removes any associated // data for the channel -func (t *Transport) CleanupChannel(chid datatransfer2.ChannelID) { +func (t *Transport) CleanupChannel(chid datatransfer.ChannelID) { t.dtChannelsLk.Lock() ch, ok := t.dtChannels[chid] @@ -288,9 +288,9 @@ func (t *Transport) CleanupChannel(chid datatransfer2.ChannelID) { } // SetEventHandler sets the handler for events on channels -func (t *Transport) SetEventHandler(events datatransfer2.EventsHandler) error { +func (t *Transport) SetEventHandler(events datatransfer.EventsHandler) error { if t.events != nil { - return datatransfer2.ErrHandlerAlreadySet + return datatransfer.ErrHandlerAlreadySet } t.events = events @@ -334,7 +334,7 @@ func (t *Transport) Shutdown(ctx context.Context) error { } // UseStore tells the graphsync transport to use the given loader and storer for this channelID -func (t *Transport) UseStore(channelID datatransfer2.ChannelID, lsys ipld.LinkSystem) error { +func (t *Transport) UseStore(channelID datatransfer.ChannelID, lsys ipld.LinkSystem) error { ch := t.trackDTChannel(channelID) return ch.useStore(lsys) } @@ -352,8 +352,8 @@ type ChannelGraphsyncRequests struct { // ChannelsForPeer describes current active channels for a given peer and their // associated graphsync requests type ChannelsForPeer struct { - SendingChannels map[datatransfer2.ChannelID]ChannelGraphsyncRequests - ReceivingChannels map[datatransfer2.ChannelID]ChannelGraphsyncRequests + SendingChannels map[datatransfer.ChannelID]ChannelGraphsyncRequests + ReceivingChannels map[datatransfer.ChannelID]ChannelGraphsyncRequests } // ChannelsForPeer identifies which channels are open and which request IDs they map to @@ -364,15 +364,15 @@ func (t *Transport) ChannelsForPeer(p peer.ID) ChannelsForPeer { // cannot have active transfers with self if p == t.peerID { return ChannelsForPeer{ - SendingChannels: map[datatransfer2.ChannelID]ChannelGraphsyncRequests{}, - ReceivingChannels: map[datatransfer2.ChannelID]ChannelGraphsyncRequests{}, + SendingChannels: map[datatransfer.ChannelID]ChannelGraphsyncRequests{}, + ReceivingChannels: map[datatransfer.ChannelID]ChannelGraphsyncRequests{}, } } - sending := make(map[datatransfer2.ChannelID]ChannelGraphsyncRequests) - receiving := make(map[datatransfer2.ChannelID]ChannelGraphsyncRequests) + sending := make(map[datatransfer.ChannelID]ChannelGraphsyncRequests) + receiving := make(map[datatransfer.ChannelID]ChannelGraphsyncRequests) // loop through every graphsync request key we're currently tracking - t.requestIDToChannelID.forEach(func(requestID graphsync.RequestID, isSending bool, chid datatransfer2.ChannelID) { + t.requestIDToChannelID.forEach(func(requestID graphsync.RequestID, isSending bool, chid datatransfer.ChannelID) { // if the associated channel ID includes the requested peer if chid.Initiator == p || chid.Responder == p { // determine whether the requested peer is one at least one end of the channel @@ -427,7 +427,7 @@ func (t *Transport) gsOutgoingRequestHook(p peer.ID, request graphsync.RequestDa initiator = p responder = t.peerID } - chid := datatransfer2.ChannelID{Initiator: initiator, Responder: responder, ID: message.TransferID()} + chid := datatransfer.ChannelID{Initiator: initiator, Responder: responder, ID: message.TransferID()} // A data transfer channel was opened err := t.events.OnChannelOpened(chid) @@ -453,12 +453,12 @@ func (t *Transport) gsIncomingBlockHook(p peer.ID, response graphsync.ResponseDa } err := t.events.OnDataReceived(chid, block.Link(), block.BlockSize(), block.Index(), block.BlockSizeOnWire() != 0) - if err != nil && err != datatransfer2.ErrPause { + if err != nil && err != datatransfer.ErrPause { hookActions.TerminateWithError(err) return } - if err == datatransfer2.ErrPause { + if err == datatransfer.ErrPause { hookActions.PauseRequest() } } @@ -503,12 +503,12 @@ func (t *Transport) gsOutgoingBlockHook(p peer.ID, request graphsync.RequestData // required) and it can return a message that will be sent with the block // (eg to ask for payment). msg, err := t.events.OnDataQueued(chid, block.Link(), block.BlockSize(), block.Index(), block.BlockSizeOnWire() != 0) - if err != nil && err != datatransfer2.ErrPause { + if err != nil && err != datatransfer.ErrPause { hookActions.TerminateWithError(err) return } - if err == datatransfer2.ErrPause { + if err == datatransfer.ErrPause { hookActions.PauseResponse() } @@ -537,12 +537,12 @@ func (t *Transport) gsReqQueuedHook(p peer.ID, request graphsync.RequestData, ho return } - var chid datatransfer2.ChannelID + var chid datatransfer.ChannelID if msg.IsRequest() { // when a data transfer request comes in on graphsync, the remote peer // initiated a pull - chid = datatransfer2.ChannelID{ID: msg.TransferID(), Initiator: p, Responder: t.peerID} - dtRequest := msg.(datatransfer2.Request) + chid = datatransfer.ChannelID{ID: msg.TransferID(), Initiator: p, Responder: t.peerID} + dtRequest := msg.(datatransfer.Request) if dtRequest.IsNew() { log.Infof("%s, pull request queued, req_id=%d", chid, request.ID()) t.events.OnTransferQueued(chid) @@ -553,8 +553,8 @@ func (t *Transport) gsReqQueuedHook(p peer.ID, request graphsync.RequestData, ho // when a data transfer response comes in on graphsync, this node // initiated a push, and the remote peer responded with a request // for data - chid = datatransfer2.ChannelID{ID: msg.TransferID(), Initiator: t.peerID, Responder: p} - response := msg.(datatransfer2.Response) + chid = datatransfer.ChannelID{ID: msg.TransferID(), Initiator: t.peerID, Responder: p} + response := msg.(datatransfer.Response) if response.IsNew() { log.Infof("%s, GS pull request queued in response to our push, req_id=%d", chid, request.ID()) t.events.OnTransferQueued(chid) @@ -588,13 +588,13 @@ func (t *Transport) gsReqRecdHook(p peer.ID, request graphsync.RequestData, hook // - The local node opened a data-transfer push channel, and in response // the remote peer sent a graphsync request for the data, and now the // local node receives that request for data - var chid datatransfer2.ChannelID - var responseMessage datatransfer2.Message + var chid datatransfer.ChannelID + var responseMessage datatransfer.Message var ch *dtChannel if msg.IsRequest() { // when a data transfer request comes in on graphsync, the remote peer // initiated a pull - chid = datatransfer2.ChannelID{ID: msg.TransferID(), Initiator: p, Responder: t.peerID} + chid = datatransfer.ChannelID{ID: msg.TransferID(), Initiator: p, Responder: t.peerID} log.Debugf("%s: received request for data (pull), req_id=%d", chid, request.ID()) @@ -603,13 +603,13 @@ func (t *Transport) gsReqRecdHook(p peer.ID, request graphsync.RequestData, hook ch.lk.Lock() defer ch.lk.Unlock() - request := msg.(datatransfer2.Request) + request := msg.(datatransfer.Request) responseMessage, err = t.events.OnRequestReceived(chid, request) } else { // when a data transfer response comes in on graphsync, this node // initiated a push, and the remote peer responded with a request // for data - chid = datatransfer2.ChannelID{ID: msg.TransferID(), Initiator: t.peerID, Responder: p} + chid = datatransfer.ChannelID{ID: msg.TransferID(), Initiator: t.peerID, Responder: p} log.Debugf("%s: received request for data (push), req_id=%d", chid, request.ID()) @@ -618,7 +618,7 @@ func (t *Transport) gsReqRecdHook(p peer.ID, request graphsync.RequestData, hook ch.lk.Lock() defer ch.lk.Unlock() - response := msg.(datatransfer2.Response) + response := msg.(datatransfer.Response) err = t.events.OnResponseReceived(chid, response) } @@ -637,7 +637,7 @@ func (t *Transport) gsReqRecdHook(p peer.ID, request graphsync.RequestData, hook } } - if err != nil && err != datatransfer2.ErrPause { + if err != nil && err != datatransfer.ErrPause { hookActions.TerminateWithError(err) return } @@ -645,7 +645,7 @@ func (t *Transport) gsReqRecdHook(p peer.ID, request graphsync.RequestData, hook // Check if the callback indicated that the channel should be paused // immediately (eg because data is still being unsealed) paused := false - if err == datatransfer2.ErrPause { + if err == datatransfer.ErrPause { log.Debugf("%s: pausing graphsync response", chid) paused = true @@ -746,7 +746,7 @@ func (t *Transport) gsRequestUpdatedHook(p peer.ID, request graphsync.RequestDat } } - if err != nil && err != datatransfer2.ErrPause { + if err != nil && err != datatransfer.ErrPause { hookActions.TerminateWithError(err) } @@ -786,7 +786,7 @@ func (t *Transport) gsIncomingResponseHook(p peer.ID, response graphsync.Respons } } -func (t *Transport) processExtension(chid datatransfer2.ChannelID, gsMsg extension.GsExtended, p peer.ID, exts []graphsync.ExtensionName) (datatransfer2.Message, error) { +func (t *Transport) processExtension(chid datatransfer.ChannelID, gsMsg extension.GsExtended, p peer.ID, exts []graphsync.ExtensionName) (datatransfer.Message, error) { // if this is a push request the sender is us. msg, err := extension.GetTransferData(gsMsg, exts) @@ -802,19 +802,19 @@ func (t *Transport) processExtension(chid datatransfer2.ChannelID, gsMsg extensi if msg.IsRequest() { // only accept request message updates when original message was also request - if (chid != datatransfer2.ChannelID{ID: msg.TransferID(), Initiator: p, Responder: t.peerID}) { + if (chid != datatransfer.ChannelID{ID: msg.TransferID(), Initiator: p, Responder: t.peerID}) { return nil, errors.New("received request on response channel") } - dtRequest := msg.(datatransfer2.Request) + dtRequest := msg.(datatransfer.Request) return t.events.OnRequestReceived(chid, dtRequest) } // only accept response message updates when original message was also response - if (chid != datatransfer2.ChannelID{ID: msg.TransferID(), Initiator: t.peerID, Responder: p}) { + if (chid != datatransfer.ChannelID{ID: msg.TransferID(), Initiator: t.peerID, Responder: p}) { return nil, errors.New("received response on request channel") } - dtResponse := msg.(datatransfer2.Response) + dtResponse := msg.(datatransfer.Response) return nil, t.events.OnResponseReceived(chid, dtResponse) } @@ -826,7 +826,7 @@ func (t *Transport) gsRequestorCancelledListener(p peer.ID, request graphsync.Re ch, err := t.getDTChannel(chid) if err != nil { - if !errors.Is(datatransfer2.ErrChannelNotFound, err) { + if !errors.Is(datatransfer.ErrChannelNotFound, err) { log.Errorf("requestor cancelled: getting channel %s: %s", chid, err) } return @@ -854,7 +854,7 @@ func (t *Transport) gsNetworkSendErrorListener(p peer.ID, request graphsync.Requ func (t *Transport) gsNetworkReceiveErrorListener(p peer.ID, gserr error) { // Fire a receive data error on all ongoing graphsync transfers with that // peer - t.requestIDToChannelID.forEach(func(k graphsync.RequestID, sending bool, chid datatransfer2.ChannelID) { + t.requestIDToChannelID.forEach(func(k graphsync.RequestID, sending bool, chid datatransfer.ChannelID) { if chid.Initiator != p && chid.Responder != p { return } @@ -866,7 +866,7 @@ func (t *Transport) gsNetworkReceiveErrorListener(p peer.ID, gserr error) { }) } -func (t *Transport) newDTChannel(chid datatransfer2.ChannelID) *dtChannel { +func (t *Transport) newDTChannel(chid datatransfer.ChannelID) *dtChannel { return &dtChannel{ t: t, channelID: chid, @@ -874,7 +874,7 @@ func (t *Transport) newDTChannel(chid datatransfer2.ChannelID) *dtChannel { } } -func (t *Transport) trackDTChannel(chid datatransfer2.ChannelID) *dtChannel { +func (t *Transport) trackDTChannel(chid datatransfer.ChannelID) *dtChannel { t.dtChannelsLk.Lock() defer t.dtChannelsLk.Unlock() @@ -887,9 +887,9 @@ func (t *Transport) trackDTChannel(chid datatransfer2.ChannelID) *dtChannel { return ch } -func (t *Transport) getDTChannel(chid datatransfer2.ChannelID) (*dtChannel, error) { +func (t *Transport) getDTChannel(chid datatransfer.ChannelID) (*dtChannel, error) { if t.events == nil { - return nil, datatransfer2.ErrHandlerNotSet + return nil, datatransfer.ErrHandlerNotSet } t.dtChannelsLk.RLock() @@ -897,14 +897,14 @@ func (t *Transport) getDTChannel(chid datatransfer2.ChannelID) (*dtChannel, erro ch, ok := t.dtChannels[chid] if !ok { - return nil, fmt.Errorf("channel %s: %w", chid, datatransfer2.ErrChannelNotFound) + return nil, fmt.Errorf("channel %s: %w", chid, datatransfer.ErrChannelNotFound) } return ch, nil } // Info needed to keep track of a data transfer channel type dtChannel struct { - channelID datatransfer2.ChannelID + channelID datatransfer.ChannelID t *Transport lk sync.RWMutex @@ -923,7 +923,7 @@ type dtChannel struct { // Info needed to monitor an ongoing graphsync request type gsReq struct { - channelID datatransfer2.ChannelID + channelID datatransfer.ChannelID responseChan <-chan graphsync.ResponseProgress errChan <-chan error onComplete func() @@ -932,11 +932,11 @@ type gsReq struct { // Open a graphsync request for data to the remote peer func (c *dtChannel) open( ctx context.Context, - chid datatransfer2.ChannelID, + chid datatransfer.ChannelID, dataSender peer.ID, root ipld.Link, stor ipld.Node, - channel datatransfer2.ChannelState, + channel datatransfer.ChannelState, exts []graphsync.ExtensionData, ) (*gsReq, error) { c.lk.Lock() @@ -1085,7 +1085,7 @@ func (c *dtChannel) pause(ctx context.Context) error { return c.t.gs.Pause(ctx, *c.requestID) } -func (c *dtChannel) resume(ctx context.Context, msg datatransfer2.Message) error { +func (c *dtChannel) resume(ctx context.Context, msg datatransfer.Message) error { c.lk.Lock() defer c.lk.Unlock() @@ -1240,7 +1240,7 @@ func (c *dtChannel) cancel(ctx context.Context) chan error { type channelInfo struct { sending bool - channelID datatransfer2.ChannelID + channelID datatransfer.ChannelID } // Used in graphsync callbacks to map from graphsync request to the @@ -1257,7 +1257,7 @@ func newRequestIDToChannelIDMap() *requestIDToChannelIDMap { } // get the value for a key -func (m *requestIDToChannelIDMap) load(key graphsync.RequestID) (datatransfer2.ChannelID, bool) { +func (m *requestIDToChannelIDMap) load(key graphsync.RequestID) (datatransfer.ChannelID, bool) { m.lk.RLock() defer m.lk.RUnlock() @@ -1266,7 +1266,7 @@ func (m *requestIDToChannelIDMap) load(key graphsync.RequestID) (datatransfer2.C } // set the value for a key -func (m *requestIDToChannelIDMap) set(key graphsync.RequestID, sending bool, chid datatransfer2.ChannelID) { +func (m *requestIDToChannelIDMap) set(key graphsync.RequestID, sending bool, chid datatransfer.ChannelID) { m.lk.Lock() defer m.lk.Unlock() @@ -1274,7 +1274,7 @@ func (m *requestIDToChannelIDMap) set(key graphsync.RequestID, sending bool, chi } // call f for each key / value in the map -func (m *requestIDToChannelIDMap) forEach(f func(k graphsync.RequestID, isSending bool, chid datatransfer2.ChannelID)) { +func (m *requestIDToChannelIDMap) forEach(f func(k graphsync.RequestID, isSending bool, chid datatransfer.ChannelID)) { m.lk.RLock() defer m.lk.RUnlock() @@ -1284,7 +1284,7 @@ func (m *requestIDToChannelIDMap) forEach(f func(k graphsync.RequestID, isSendin } // delete any keys that reference this value -func (m *requestIDToChannelIDMap) deleteRefs(id datatransfer2.ChannelID) { +func (m *requestIDToChannelIDMap) deleteRefs(id datatransfer.ChannelID) { m.lk.Lock() defer m.lk.Unlock() diff --git a/datatransfer/types_cbor_gen.go b/datatransfer/types_cbor_gen.go index 389c214e1..f28216574 100644 --- a/datatransfer/types_cbor_gen.go +++ b/datatransfer/types_cbor_gen.go @@ -8,10 +8,10 @@ import ( "math" "sort" - "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p/core/peer" + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p/core/peer" cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" + xerrors "golang.org/x/xerrors" ) var _ = xerrors.Errorf diff --git a/go.mod b/go.mod index 9893dba54..f4514f529 100644 --- a/go.mod +++ b/go.mod @@ -8,8 +8,6 @@ replace github.com/filecoin-project/boostd-data => ./extern/boostd-data // replace github.com/filecoin-project/boost-graphsync => ../boost-graphsync -// replace github.com/filecoin-project/go-data-transfer => ../go-data-transfer - // replace github.com/filecoin-project/boost-gfm => ../boost-gfm require ( @@ -327,7 +325,6 @@ require ( require ( github.com/filecoin-project/boost-gfm v1.26.7 github.com/filecoin-project/boost-graphsync v0.13.9 - github.com/filecoin-project/go-data-transfer/v2 v2.0.0-rc7 github.com/filecoin-project/go-fil-markets v1.28.3 github.com/filecoin-project/lotus v1.23.4-rc1 github.com/ipfs/boxo v0.12.0 @@ -344,6 +341,7 @@ require ( require ( github.com/Jorropo/jsync v1.0.1 // indirect + github.com/filecoin-project/go-data-transfer/v2 v2.0.0-rc7 // indirect github.com/filecoin-project/kubo-api-client v0.0.2-0.20230829103503-14448166d14d // indirect github.com/gammazero/channelqueue v0.2.1 // indirect github.com/gammazero/deque v0.2.1 // indirect diff --git a/retrievalmarket/mock/gen.go b/retrievalmarket/mock/gen.go index 8b5d222bc..f99d4ba80 100644 --- a/retrievalmarket/mock/gen.go +++ b/retrievalmarket/mock/gen.go @@ -1,4 +1,4 @@ package mock -//go:generate go run github.com/golang/mock/mockgen -destination=./piecestore.go -package=mock github.com/filecoin-project/boost/node/modules/piecestore PieceStore +//go:generate go run github.com/golang/mock/mockgen -destination=./piecestore.go -package=mock github.com/filecoin-project/boost/markets/piecestore PieceStore //go:generate go run github.com/golang/mock/mockgen -destination=./retrievalmarket.go -package=mock github.com/filecoin-project/Boost/retrievalmarket/legacyretrievaltypes RetrievalProvider,SectorAccessor diff --git a/retrievalmarket/mock/piecestore.go b/retrievalmarket/mock/piecestore.go index 383d72c7e..4ad67e41e 100644 --- a/retrievalmarket/mock/piecestore.go +++ b/retrievalmarket/mock/piecestore.go @@ -1,17 +1,17 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/filecoin-project/boost/node/modules/piecestore (interfaces: PieceStore) +// Source: github.com/filecoin-project/boost/markets/piecestore (interfaces: PieceStore) // Package mock is a generated GoMock package. package mock import ( - "context" - "reflect" + context "context" + reflect "reflect" - "github.com/filecoin-project/boost/markets/piecestore" - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/golang/mock/gomock" - "github.com/ipfs/go-cid" + piecestore "github.com/filecoin-project/boost/markets/piecestore" + shared "github.com/filecoin-project/boost/markets/shared" + gomock "github.com/golang/mock/gomock" + cid "github.com/ipfs/go-cid" ) // MockPieceStore is a mock of PieceStore interface. diff --git a/retrievalmarket/server/gsunpaidretrieval.go b/retrievalmarket/server/gsunpaidretrieval.go index e25f9548c..0e166aa92 100644 --- a/retrievalmarket/server/gsunpaidretrieval.go +++ b/retrievalmarket/server/gsunpaidretrieval.go @@ -7,7 +7,7 @@ import ( "sync" graphsync "github.com/filecoin-project/boost-graphsync" - datatransfer2 "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/datatransfer" "github.com/filecoin-project/boost/datatransfer/encoding" "github.com/filecoin-project/boost/datatransfer/message" "github.com/filecoin-project/boost/datatransfer/network" @@ -16,6 +16,7 @@ import ( "github.com/filecoin-project/boost/metrics" "github.com/filecoin-project/boost/piecedirectory" "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" + "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes/migrations" "github.com/filecoin-project/go-state-types/abi" "github.com/hannahhoward/go-pubsub" logging "github.com/ipfs/go-log/v2" @@ -35,7 +36,7 @@ var incomingReqExtensions = []graphsync.ExtensionName{ // Uniquely identify a request (requesting peer + data transfer id) type reqId struct { p peer.ID - id datatransfer2.TransferID + id datatransfer.TransferID } // GraphsyncUnpaidRetrieval intercepts incoming requests to Graphsync. @@ -78,7 +79,7 @@ func (rag *retrievalAskGetter) GetAsk() *legacyretrievaltypes.Ask { return &rag.ask } -func NewRetrievalAskGetter() *retrievalAskGetter { +func NewRetrievalAskGetter() RetrievalAskGetter { return &retrievalAskGetter{ ask: legacyretrievaltypes.Ask{ PricePerByte: abi.NewTokenAmount(0), @@ -100,7 +101,10 @@ func NewGraphsyncUnpaidRetrieval(peerID peer.ID, gs graphsync.GraphExchange, dtn if err != nil { return nil, err } - + err = typeRegistry.Register(&migrations.DealProposal0{}, nil) + if err != nil { + return nil, err + } return &GraphsyncUnpaidRetrieval{ GraphExchange: gs, peerID: peerID, @@ -121,7 +125,7 @@ func (g *GraphsyncUnpaidRetrieval) Start(ctx context.Context) error { } // Called when a new request is received -func (g *GraphsyncUnpaidRetrieval) trackTransfer(p peer.ID, id datatransfer2.TransferID, state *retrievalState) { +func (g *GraphsyncUnpaidRetrieval) trackTransfer(p peer.ID, id datatransfer.TransferID, state *retrievalState) { // Record the transfer as an active retrieval so we can distinguish between // retrievals intercepted by this class, and those passed through to the // paid retrieval implementation. @@ -137,7 +141,7 @@ func (g *GraphsyncUnpaidRetrieval) trackTransfer(p peer.ID, id datatransfer2.Tra // Called when a request completes (either successfully or in failure) // TODO: Make sure that untrackTransfer is always called eventually // (may need to add a timeout) -func (g *GraphsyncUnpaidRetrieval) untrackTransfer(p peer.ID, id datatransfer2.TransferID) { +func (g *GraphsyncUnpaidRetrieval) untrackTransfer(p peer.ID, id datatransfer.TransferID) { g.activeRetrievalsLk.Lock() delete(g.activeRetrievals, reqId{p: p, id: id}) g.activeRetrievalsLk.Unlock() @@ -145,7 +149,7 @@ func (g *GraphsyncUnpaidRetrieval) untrackTransfer(p peer.ID, id datatransfer2.T g.dtnet.Unprotect(p, fmt.Sprintf("%d", id)) } -func (g *GraphsyncUnpaidRetrieval) CancelTransfer(ctx context.Context, id datatransfer2.TransferID, p *peer.ID) error { +func (g *GraphsyncUnpaidRetrieval) CancelTransfer(ctx context.Context, id datatransfer.TransferID, p *peer.ID) error { g.activeRetrievalsLk.Lock() var state *retrievalState @@ -242,7 +246,7 @@ func (g *GraphsyncUnpaidRetrieval) interceptRetrieval(p peer.ID, request graphsy return false, nil } - dtRequest := msg.(datatransfer2.Request) + dtRequest := msg.(datatransfer.Request) if !dtRequest.IsNew() && !dtRequest.IsRestart() { // The request is not for a new retrieval (it's a cancel etc). // If this message is for an existing unpaid retrieval it will already @@ -255,15 +259,29 @@ func (g *GraphsyncUnpaidRetrieval) interceptRetrieval(p peer.ID, request graphsy // The request is for a new transfer / restart transfer, so check if it's // for an unpaid retrieval. We are explicitly checking for voucher type to be // legacyretrievaltypes.DealProposal{}. Rest are all rejected at this stage. - _, decodeErr := g.decodeVoucher(dtRequest, g.decoder) + voucher, decodeErr := g.decodeVoucher(dtRequest, g.decoder) if decodeErr != nil { - return false, fmt.Errorf("decoding new request voucher: %w", decodeErr) + // If we don't recognize the voucher, don't intercept the retrieval. + // Instead it will be passed through to the legacy code for processing. + if !errors.Is(decodeErr, unknownVoucherErr) { + return false, fmt.Errorf("decoding new request voucher: %w", decodeErr) + } + } + switch v := voucher.(type) { + case *legacyretrievaltypes.DealProposal: + // This is a retrieval deal + proposal := *v + return g.handleRetrievalDeal(p, msg, proposal, request, RetrievalTypeDeal) + case *migrations.DealProposal0: + // This is a retrieval deal with an older format + proposal := migrations.MigrateDealProposal0To1(*v) + return g.handleRetrievalDeal(p, msg, proposal, request, RetrievalTypeDeal) } - return g.handleRetrievalDeal(p, msg, legacyretrievaltypes.DealProposal{}, request, RetrievalTypeDeal) + return false, nil } -func (g *GraphsyncUnpaidRetrieval) handleRetrievalDeal(peerID peer.ID, msg datatransfer2.Message, proposal legacyretrievaltypes.DealProposal, request graphsync.RequestData, retType RetrievalType) (bool, error) { +func (g *GraphsyncUnpaidRetrieval) handleRetrievalDeal(peerID peer.ID, msg datatransfer.Message, proposal legacyretrievaltypes.DealProposal, request graphsync.RequestData, retType RetrievalType) (bool, error) { // If it's a paid retrieval, do not intercept it if !proposal.UnsealPrice.IsZero() || !proposal.PricePerByte.IsZero() { return false, nil @@ -281,13 +299,13 @@ func (g *GraphsyncUnpaidRetrieval) handleRetrievalDeal(peerID peer.ID, msg datat selector: &cbg.Deferred{Raw: selBytes}, sender: g.peerID, recipient: peerID, - status: datatransfer2.Requested, + status: datatransfer.Requested, isPull: true, } mktsState := &legacyretrievaltypes.ProviderDealState{ DealProposal: proposal, - ChannelID: &datatransfer2.ChannelID{ID: msg.TransferID(), Initiator: peerID, Responder: g.peerID}, + ChannelID: &datatransfer.ChannelID{ID: msg.TransferID(), Initiator: peerID, Responder: g.peerID}, Status: legacyretrievaltypes.DealStatusNew, Receiver: peerID, FundsReceived: abi.NewTokenAmount(0), @@ -304,7 +322,7 @@ func (g *GraphsyncUnpaidRetrieval) handleRetrievalDeal(peerID peer.ID, msg datat g.trackTransfer(peerID, msg.TransferID(), state) // Fire transfer queued event - g.publishDTEvent(datatransfer2.TransferRequestQueued, "", cs) + g.publishDTEvent(datatransfer.TransferRequestQueued, "", cs) // This is an unpaid retrieval, so this class is responsible for // handling it @@ -336,7 +354,7 @@ func (g *GraphsyncUnpaidRetrieval) RegisterIncomingRequestHook(hook graphsync.On if msg.IsRestart() { dtOpenMsg += " (restart)" } - g.publishDTEvent(datatransfer2.Open, dtOpenMsg, state.cs) + g.publishDTEvent(datatransfer.Open, dtOpenMsg, state.cs) g.publishMktsEvent(legacyretrievaltypes.ProviderEventOpen, *state.mkts) err := func() error { @@ -350,7 +368,7 @@ func (g *GraphsyncUnpaidRetrieval) RegisterIncomingRequestHook(hook graphsync.On isAccepted := validateErr == nil const isPaused = false // There are no payments required, so never pause - resultType := datatransfer2.EmptyTypeIdentifier + resultType := datatransfer.EmptyTypeIdentifier if res != nil { resultType = res.Type() } @@ -391,8 +409,8 @@ func (g *GraphsyncUnpaidRetrieval) RegisterIncomingRequestHook(hook graphsync.On hookActions.ValidateRequest() // Fire events - state.cs.status = datatransfer2.Ongoing - g.publishDTEvent(datatransfer2.Accept, "", state.cs) + state.cs.status = datatransfer.Ongoing + g.publishDTEvent(datatransfer.Accept, "", state.cs) state.mkts.Status = legacyretrievaltypes.DealStatusUnsealing g.publishMktsEvent(legacyretrievaltypes.ProviderEventDealAccepted, *state.mkts) state.mkts.Status = legacyretrievaltypes.DealStatusUnsealed @@ -477,8 +495,8 @@ func (g *GraphsyncUnpaidRetrieval) RegisterCompletedResponseListener(listener gr return } - state.cs.status = datatransfer2.Completed - g.publishDTEvent(datatransfer2.Complete, "", state.cs) + state.cs.status = datatransfer.Completed + g.publishDTEvent(datatransfer.Complete, "", state.cs) // Fire markets blocks completed event state.mkts.Status = legacyretrievaltypes.DealStatusCompleted g.publishMktsEvent(legacyretrievaltypes.ProviderEventComplete, *state.mkts) @@ -500,8 +518,8 @@ func (g *GraphsyncUnpaidRetrieval) RegisterRequestorCancelledListener(listener g return } - state.cs.status = datatransfer2.Cancelled - g.publishDTEvent(datatransfer2.Cancel, "client cancelled", state.cs) + state.cs.status = datatransfer.Cancelled + g.publishDTEvent(datatransfer.Cancel, "client cancelled", state.cs) state.mkts.Status = legacyretrievaltypes.DealStatusCancelled g.publishMktsEvent(legacyretrievaltypes.ProviderEventCancelComplete, *state.mkts) @@ -537,7 +555,7 @@ func (g *GraphsyncUnpaidRetrieval) RegisterBlockSentListener(listener graphsync. // Fire block sent event state.cs.sent += block.BlockSizeOnWire() - g.publishDTEvent(datatransfer2.DataSent, "", state.cs) + g.publishDTEvent(datatransfer.DataSent, "", state.cs) state.mkts.TotalSent += block.BlockSizeOnWire() stats.Record(g.ctx, metrics.GraphsyncRequestBlockSentCount.M(1)) @@ -571,9 +589,9 @@ func (g *GraphsyncUnpaidRetrieval) RegisterNetworkErrorListener(listener graphsy } func (g *GraphsyncUnpaidRetrieval) failTransfer(state *retrievalState, err error) { - state.cs.status = datatransfer2.Failed + state.cs.status = datatransfer.Failed state.cs.message = err.Error() - g.publishDTEvent(datatransfer2.Error, err.Error(), state.cs) + g.publishDTEvent(datatransfer.Error, err.Error(), state.cs) state.mkts.Status = legacyretrievaltypes.DealStatusErrored g.publishMktsEvent(legacyretrievaltypes.ProviderEventDataTransferError, *state.mkts) @@ -583,7 +601,7 @@ func (g *GraphsyncUnpaidRetrieval) failTransfer(state *retrievalState, err error var unknownVoucherErr = errors.New("unknown voucher type") -func (g *GraphsyncUnpaidRetrieval) decodeVoucher(request datatransfer2.Request, registry *registry.Registry) (datatransfer2.Voucher, error) { +func (g *GraphsyncUnpaidRetrieval) decodeVoucher(request datatransfer.Request, registry *registry.Registry) (datatransfer.Voucher, error) { vtypStr := request.VoucherType() decoder, has := registry.Decoder(vtypStr) if !has { @@ -593,10 +611,10 @@ func (g *GraphsyncUnpaidRetrieval) decodeVoucher(request datatransfer2.Request, if err != nil { return nil, err } - return encodable.(datatransfer2.Registerable), nil + return encodable.(datatransfer.Registerable), nil } -func (g *GraphsyncUnpaidRetrieval) isRequestForActiveUnpaidRetrieval(p peer.ID, request graphsync.RequestData) (datatransfer2.Request, *retrievalState, bool) { +func (g *GraphsyncUnpaidRetrieval) isRequestForActiveUnpaidRetrieval(p peer.ID, request graphsync.RequestData) (datatransfer.Request, *retrievalState, bool) { // Extract the data transfer message from the Graphsync request msg, err := extension.GetTransferData(request, defaultExtensions) if err != nil { @@ -613,7 +631,7 @@ func (g *GraphsyncUnpaidRetrieval) isRequestForActiveUnpaidRetrieval(p peer.ID, return nil, nil, false } - dtRequest := msg.(datatransfer2.Request) + dtRequest := msg.(datatransfer.Request) state, ok := g.isActiveUnpaidRetrieval(reqId{p: p, id: msg.TransferID()}) return dtRequest, state, ok } diff --git a/retrievalmarket/types/voucher_legs.go b/retrievalmarket/types/voucher_legs.go index f92747097..b8f629e07 100644 --- a/retrievalmarket/types/voucher_legs.go +++ b/retrievalmarket/types/voucher_legs.go @@ -1,24 +1,18 @@ package types -import ( - datatransfer "github.com/filecoin-project/go-data-transfer" - datatransfer2 "github.com/filecoin-project/go-data-transfer/v2" - "github.com/ipni/go-libipni/dagsync/dtsync" -) - -type LegsVoucherDTv1 struct { - dtsync.Voucher -} - -func (l *LegsVoucherDTv1) Type() datatransfer.TypeIdentifier { - return datatransfer.TypeIdentifier(dtsync.LegsVoucherType) -} - -type LegsVoucherResultDtv1 struct { - VoucherType datatransfer2.TypeIdentifier - dtsync.VoucherResult -} - -func (d *LegsVoucherResultDtv1) Type() datatransfer.TypeIdentifier { - return datatransfer.TypeIdentifier(d.VoucherType) -} +//type LegsVoucherDTv1 struct { +// dtsync.Voucher +//} +// +//func (l *LegsVoucherDTv1) Type() datatransfer.TypeIdentifier { +// return datatransfer.TypeIdentifier(dtsync.LegsVoucherType) +//} +// +//type LegsVoucherResultDtv1 struct { +// VoucherType datatransfer2.TypeIdentifier +// dtsync.VoucherResult +//} +// +//func (d *LegsVoucherResultDtv1) Type() datatransfer.TypeIdentifier { +// return datatransfer.TypeIdentifier(d.VoucherType) +//} From c1f510e48e0ad41a8144cf09c88c2d9cb29bcf65 Mon Sep 17 00:00:00 2001 From: Rod Vagg Date: Wed, 25 Oct 2023 18:35:52 +1100 Subject: [PATCH 11/34] chore: lots more dt & gs debugging (#1776) --- Makefile | 2 +- datatransfer/transport/graphsync/graphsync.go | 8 +++++-- itests/framework/log.go | 3 +++ node/modules/graphsync.go | 1 + retrievalmarket/server/gsunpaidretrieval.go | 23 +++++++++++++++---- 5 files changed, 30 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index bd7028d75..caadea98b 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ $(warning Your Golang version is go$(shell expr $(GOVERSION) / 1000000).$(shell $(error Update Golang to version to at least 1.20.0) endif -ALLOWED_NODE_VERSIONS := 16 18 +ALLOWED_NODE_VERSIONS := 16 18 20 validate-node-version: ifeq ($(filter $(shell node -v | cut -c2-3),$(ALLOWED_NODE_VERSIONS)),) @echo "Unsupported Node.js version. Please install one of the following versions: $(ALLOWED_NODE_VERSIONS)" diff --git a/datatransfer/transport/graphsync/graphsync.go b/datatransfer/transport/graphsync/graphsync.go index 3009b7941..b3f8e0058 100644 --- a/datatransfer/transport/graphsync/graphsync.go +++ b/datatransfer/transport/graphsync/graphsync.go @@ -570,15 +570,19 @@ func (t *Transport) gsReqQueuedHook(p peer.ID, request graphsync.RequestData, ho // gsReqRecdHook is called when graphsync receives an incoming request for data func (t *Transport) gsReqRecdHook(p peer.ID, request graphsync.RequestData, hookActions graphsync.IncomingRequestHookActions) { + log.Infow("received incoming request", "request", request) + // if this is a push request the sender is us. msg, err := extension.GetTransferData(request, t.supportedExtensions) if err != nil { + log.Debugw("failed GetTransferData", "request", request, "err", err) hookActions.TerminateWithError(err) return } // extension not found; probably not our request. if msg == nil { + log.Debugw("no transfer data", "request", request) return } @@ -629,6 +633,7 @@ func (t *Transport) gsReqRecdHook(p peer.ID, request graphsync.RequestData, hook // protocol versions out there. extensions, extensionErr := extension.ToExtensionData(responseMessage, incomingReqExtensions) if extensionErr != nil { + log.Debugw("failed to convert extension data", "err", extensionErr) hookActions.TerminateWithError(err) return } @@ -638,6 +643,7 @@ func (t *Transport) gsReqRecdHook(p peer.ID, request graphsync.RequestData, hook } if err != nil && err != datatransfer.ErrPause { + log.Debugw("failed to process request", "err", err) hookActions.TerminateWithError(err) return } @@ -749,7 +755,6 @@ func (t *Transport) gsRequestUpdatedHook(p peer.ID, request graphsync.RequestDat if err != nil && err != datatransfer.ErrPause { hookActions.TerminateWithError(err) } - } // gsIncomingResponseHook is a graphsync.OnIncomingResponseHook. We use it to pass on responses @@ -787,7 +792,6 @@ func (t *Transport) gsIncomingResponseHook(p peer.ID, response graphsync.Respons } func (t *Transport) processExtension(chid datatransfer.ChannelID, gsMsg extension.GsExtended, p peer.ID, exts []graphsync.ExtensionName) (datatransfer.Message, error) { - // if this is a push request the sender is us. msg, err := extension.GetTransferData(gsMsg, exts) if err != nil { diff --git a/itests/framework/log.go b/itests/framework/log.go index c6668bd6b..219b4927b 100644 --- a/itests/framework/log.go +++ b/itests/framework/log.go @@ -6,6 +6,9 @@ func SetLogLevel() { _ = logging.SetLogLevel("boosttest", "DEBUG") _ = logging.SetLogLevel("devnet", "DEBUG") _ = logging.SetLogLevel("boost", "DEBUG") + _ = logging.SetLogLevel("graphsync", "DEBUG") + _ = logging.SetLogLevel("boostgs", "DEBUG") + _ = logging.SetLogLevel("dt_graphsync", "DEBUG") _ = logging.SetLogLevel("provider", "DEBUG") _ = logging.SetLogLevel("http-transfer", "DEBUG") _ = logging.SetLogLevel("boost-provider", "DEBUG") diff --git a/node/modules/graphsync.go b/node/modules/graphsync.go index 106e270e1..a63bd1d71 100644 --- a/node/modules/graphsync.go +++ b/node/modules/graphsync.go @@ -40,6 +40,7 @@ func RetrievalGraphsync(parallelTransfersForStorage uint64, parallelTransfersFor SectorAccessor: sa, AskStore: askGetter, } + gsupr, err := server.NewGraphsyncUnpaidRetrieval(h.ID(), gs, net, vdeps) if err != nil { return nil, err diff --git a/retrievalmarket/server/gsunpaidretrieval.go b/retrievalmarket/server/gsunpaidretrieval.go index 0e166aa92..5ebea3236 100644 --- a/retrievalmarket/server/gsunpaidretrieval.go +++ b/retrievalmarket/server/gsunpaidretrieval.go @@ -25,8 +25,10 @@ import ( "go.opencensus.io/stats" ) -var log = logging.Logger("boostgs") -var ErrRetrievalNotFound = fmt.Errorf("no transfer found") +var ( + log = logging.Logger("boostgs") + ErrRetrievalNotFound = fmt.Errorf("no transfer found") +) var incomingReqExtensions = []graphsync.ExtensionName{ extension.ExtensionIncomingRequest1_1, @@ -211,6 +213,7 @@ func (g *GraphsyncUnpaidRetrieval) List() []retrievalState { // Called when a transfer is received by graphsync and queued for processing func (g *GraphsyncUnpaidRetrieval) RegisterIncomingRequestQueuedHook(hook graphsync.OnIncomingRequestQueuedHook) graphsync.UnregisterHookFunc { return g.GraphExchange.RegisterIncomingRequestQueuedHook(func(p peer.ID, request graphsync.RequestData, hookActions graphsync.RequestQueuedHookActions) { + log.Debugw("incoming request queued", "request", request) stats.Record(g.ctx, metrics.GraphsyncRequestQueuedCount.M(1)) interceptRtvl, err := g.interceptRetrieval(p, request) @@ -237,12 +240,14 @@ func (g *GraphsyncUnpaidRetrieval) interceptRetrieval(p peer.ID, request graphsy } // Extension not found, ignore if msg == nil { + log.Debugw("no extension found", "request", request) return false, nil } // When a data transfer request comes in on graphsync, the remote peer // initiated a pull request for data. If it's not a request, ignore it. if !msg.IsRequest() { + log.Debugw("ignoring non-request message", "request", request) return false, nil } @@ -253,6 +258,7 @@ func (g *GraphsyncUnpaidRetrieval) interceptRetrieval(p peer.ID, request graphsy // be in our map (because we must have already processed the new // retrieval request) _, ok := g.isActiveUnpaidRetrieval(reqId{p: p, id: msg.TransferID()}) + log.Debugw("ignoring non-new request", "request", request, "isActiveUnpaidRetrieval", ok) return ok, nil } @@ -263,6 +269,7 @@ func (g *GraphsyncUnpaidRetrieval) interceptRetrieval(p peer.ID, request graphsy if decodeErr != nil { // If we don't recognize the voucher, don't intercept the retrieval. // Instead it will be passed through to the legacy code for processing. + log.Debugw("decoding new request voucher", "request", request, "err", decodeErr) if !errors.Is(decodeErr, unknownVoucherErr) { return false, fmt.Errorf("decoding new request voucher: %w", decodeErr) } @@ -271,13 +278,16 @@ func (g *GraphsyncUnpaidRetrieval) interceptRetrieval(p peer.ID, request graphsy case *legacyretrievaltypes.DealProposal: // This is a retrieval deal proposal := *v + log.Debugw("intercepting retrieval deal", "proposal", proposal) return g.handleRetrievalDeal(p, msg, proposal, request, RetrievalTypeDeal) case *migrations.DealProposal0: // This is a retrieval deal with an older format proposal := migrations.MigrateDealProposal0To1(*v) + log.Debugw("intercepting retrieval deal v1", "proposal", proposal) return g.handleRetrievalDeal(p, msg, proposal, request, RetrievalTypeDeal) } + log.Debugw("ignoring request", "request", request) return false, nil } @@ -332,6 +342,7 @@ func (g *GraphsyncUnpaidRetrieval) handleRetrievalDeal(peerID peer.ID, msg datat // Called by graphsync when an incoming request is processed func (g *GraphsyncUnpaidRetrieval) RegisterIncomingRequestHook(hook graphsync.OnIncomingRequestHook) graphsync.UnregisterHookFunc { return g.GraphExchange.RegisterIncomingRequestHook(func(p peer.ID, request graphsync.RequestData, hookActions graphsync.IncomingRequestHookActions) { + log.Debugw("incoming request", "request", request) stats.Record(g.ctx, metrics.GraphsyncRequestStartedCount.M(1)) // Check if this is a request for a retrieval that we should handle @@ -340,6 +351,7 @@ func (g *GraphsyncUnpaidRetrieval) RegisterIncomingRequestHook(hook graphsync.On // Otherwise pass it through to the legacy code hook(p, request, hookActions) stats.Record(g.ctx, metrics.GraphsyncRequestStartedPaidCount.M(1)) + log.Debugw("passing paid request through to legacy code", "request", request) return } @@ -354,17 +366,20 @@ func (g *GraphsyncUnpaidRetrieval) RegisterIncomingRequestHook(hook graphsync.On if msg.IsRestart() { dtOpenMsg += " (restart)" } + log.Debugw("handling unpaid request", "request", request, "msg", msg, "state", state) g.publishDTEvent(datatransfer.Open, dtOpenMsg, state.cs) g.publishMktsEvent(legacyretrievaltypes.ProviderEventOpen, *state.mkts) err := func() error { voucher, decodeErr := g.decodeVoucher(msg, g.decoder) if decodeErr != nil { + log.Debugw("decoding new request voucher", "request", request, "err", decodeErr) return fmt.Errorf("decoding new request voucher: %w", decodeErr) } // Validate the request res, validateErr := g.validator.validatePullRequest(msg.IsRestart(), p, voucher, request.Root(), request.Selector()) + log.Debugw("validating request", "request", request, "result", res, "err", validateErr) isAccepted := validateErr == nil const isPaused = false // There are no payments required, so never pause @@ -397,8 +412,8 @@ func (g *GraphsyncUnpaidRetrieval) RegisterIncomingRequestHook(hook graphsync.On return validateErr }() - if err != nil { + log.Debugw("validation failed", "request", request, "err", err) hookActions.TerminateWithError(err) g.failTransfer(state, err) stats.Record(g.ctx, metrics.GraphsyncRequestStartedUnpaidFailCount.M(1)) @@ -417,6 +432,7 @@ func (g *GraphsyncUnpaidRetrieval) RegisterIncomingRequestHook(hook graphsync.On g.publishMktsEvent(legacyretrievaltypes.ProviderEventUnsealComplete, *state.mkts) stats.Record(g.ctx, metrics.GraphsyncRequestStartedUnpaidSuccessCount.M(1)) + log.Debugw("successfully validated request", "request", request) }) } @@ -508,7 +524,6 @@ func (g *GraphsyncUnpaidRetrieval) RegisterCompletedResponseListener(listener gr func (g *GraphsyncUnpaidRetrieval) RegisterRequestorCancelledListener(listener graphsync.OnRequestorCancelledListener) graphsync.UnregisterHookFunc { return g.GraphExchange.RegisterRequestorCancelledListener(func(p peer.ID, request graphsync.RequestData) { - stats.Record(g.ctx, metrics.GraphsyncRequestClientCancelledCount.M(1)) _, state, intercept := g.isRequestForActiveUnpaidRetrieval(p, request) From ce036d62bc38594db6682ea8ca81bd9f09d1ee22 Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Tue, 31 Oct 2023 17:18:38 +0400 Subject: [PATCH 12/34] fix lint errs, add back ProviderDataTransfer --- itests/dummydeal_offline_test.go | 4 ++ itests/framework/framework.go | 27 ++++++------- itests/graphsync_identity_cid_test.go | 1 + node/builder.go | 2 +- retrievalmarket/server/datatransfer.go | 52 ++++++++++++++++++++++++++ 5 files changed, 72 insertions(+), 14 deletions(-) create mode 100644 retrievalmarket/server/datatransfer.go diff --git a/itests/dummydeal_offline_test.go b/itests/dummydeal_offline_test.go index 4abf1ceac..32df3219d 100644 --- a/itests/dummydeal_offline_test.go +++ b/itests/dummydeal_offline_test.go @@ -45,4 +45,8 @@ func TestDummydealOffline(t *testing.T) { require.True(t, res.Accepted) err = f.WaitForDealAddedToSector(offlineDealUuid) require.NoError(t, err) + + // rootCid is an identity CID + outFile := f.Retrieve(ctx, t, tempdir, rootCid, dealRes.DealParams.ClientDealProposal.Proposal.PieceCID, true, nil) + kit.AssertFilesEqual(t, randomFilepath, outFile) } diff --git a/itests/framework/framework.go b/itests/framework/framework.go index a60ff29af..a91ea1d43 100644 --- a/itests/framework/framework.go +++ b/itests/framework/framework.go @@ -880,23 +880,24 @@ func (f *TestFramework) Retrieve(ctx context.Context, t *testing.T, tempdir stri if !extractCar { // Write file as car file - file, err := os.CreateTemp(path.Join(tempdir, "retrievals"), "*"+root.String()+".car") - require.NoError(t, err) + file, err1 := os.CreateTemp(path.Join(tempdir, "retrievals"), "*"+root.String()+".car") + require.NoError(t, err1) out = file.Name() - err = car.WriteCar(ctx, dservOffline, []cid.Cid{root}, file) - require.NoError(t, err) + err1 = car.WriteCar(ctx, dservOffline, []cid.Cid{root}, file) + require.NoError(t, err1) } else { // Otherwise write file as UnixFS File - ufsFile, err := unixfile.NewUnixfsFile(ctx, dservOffline, dnode) - require.NoError(t, err) - file, err := os.CreateTemp(path.Join(tempdir, "retrievals"), "*"+root.String()) - err = file.Close() - require.NoError(t, err) - err = os.Remove(file.Name()) - require.NoError(t, err) - err = files.WriteTo(ufsFile, file.Name()) - require.NoError(t, err) + ufsFile, err1 := unixfile.NewUnixfsFile(ctx, dservOffline, dnode) + require.NoError(t, err1) + file, err1 := os.CreateTemp(path.Join(tempdir, "retrievals"), "*"+root.String()) + require.NoError(t, err1) + err1 = file.Close() + require.NoError(t, err1) + err1 = os.Remove(file.Name()) + require.NoError(t, err1) + err1 = files.WriteTo(ufsFile, file.Name()) + require.NoError(t, err1) } diff --git a/itests/graphsync_identity_cid_test.go b/itests/graphsync_identity_cid_test.go index 88c0a32f5..daa6b9ca7 100644 --- a/itests/graphsync_identity_cid_test.go +++ b/itests/graphsync_identity_cid_test.go @@ -114,6 +114,7 @@ func TestDealAndRetrievalWithIdentityCID(t *testing.T) { log.Infof("deal ID is : %s", dealCid.String()) // Wait for the first deal to be added to a sector and cleaned up so space is made err = f.WaitForDealAddedToSector(dealUuid) + require.NoError(t, err) time.Sleep(5 * time.Second) diff --git a/node/builder.go b/node/builder.go index ce894fb69..b180a12a6 100644 --- a/node/builder.go +++ b/node/builder.go @@ -536,6 +536,7 @@ func ConfigBoost(cfg *config.Boost) Option { // Lotus Markets Override(new(dtypes.ProviderTransferNetwork), modules.NewProviderTransferNetwork), + Override(new(server.ProviderDataTransfer), server.NewProviderDataTransfer), Override(new(server.RetrievalAskGetter), server.NewRetrievalAskGetter), Override(new(*server.GraphsyncUnpaidRetrieval), modules.RetrievalGraphsync(cfg.LotusDealmaking.SimultaneousTransfersForStorage, cfg.LotusDealmaking.SimultaneousTransfersForStoragePerClient, cfg.LotusDealmaking.SimultaneousTransfersForRetrieval)), Override(new(dtypes.StagingGraphsync), From(new(*server.GraphsyncUnpaidRetrieval))), @@ -543,7 +544,6 @@ func ConfigBoost(cfg *config.Boost) Option { // Lotus Markets (retrieval deps) Override(new(sealer.PieceProvider), sealer.NewPieceProvider), - Override(new(*bdclient.Store), modules.NewPieceDirectoryStore(cfg)), Override(new(*lib.MultiMinerAccessor), modules.NewMultiminerSectorAccessor(cfg)), Override(new(*piecedirectory.PieceDirectory), modules.NewPieceDirectory(cfg)), diff --git a/retrievalmarket/server/datatransfer.go b/retrievalmarket/server/datatransfer.go new file mode 100644 index 000000000..ed7ca9f98 --- /dev/null +++ b/retrievalmarket/server/datatransfer.go @@ -0,0 +1,52 @@ +package server + +import ( + "context" + "errors" + "time" + + "github.com/filecoin-project/boost/datatransfer" + dtimpl "github.com/filecoin-project/boost/datatransfer/impl" + marketevents "github.com/filecoin-project/boost/markets/loggers" + "github.com/filecoin-project/boost/node/modules/dtypes" + lotus_dtypes "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/repo" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + "go.uber.org/fx" +) + +type ProviderDataTransfer datatransfer.Manager + +// NewProviderDataTransfer returns a data transfer manager +func NewProviderDataTransfer(lc fx.Lifecycle, net dtypes.ProviderTransferNetwork, transport dtypes.ProviderTransport, ds lotus_dtypes.MetadataDS, r repo.LockedRepo) (ProviderDataTransfer, error) { + dtDs := namespace.Wrap(ds, datastore.NewKey("/datatransfer/provider/transfers")) + + dt, err := dtimpl.NewDataTransfer(dtDs, net, transport) + if err != nil { + return nil, err + } + + dt.OnReady(marketevents.ReadyLogger("provider data transfer")) + lc.Append(fx.Hook{ + OnStart: func(ctx context.Context) error { + dt.SubscribeToEvents(marketevents.DataTransferLogger) + return dt.Start(ctx) + }, + OnStop: func(ctx context.Context) error { + errc := make(chan error) + + go func() { + errc <- dt.Stop(ctx) + }() + + select { + case err := <-errc: + return err + case <-time.After(5 * time.Second): + return errors.New("couldnt stop datatransfer.Manager in 5 seconds. forcing an App.Stop") + } + }, + }) + return dt, nil +} From c1b8b82d17676d849674bc5e881641606831faf5 Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Tue, 31 Oct 2023 18:36:01 +0400 Subject: [PATCH 13/34] regenrate cbor --- .../internal/internalchannel_cbor_gen.go | 694 +++++++++--------- go.mod | 2 +- node/builder.go | 3 +- 3 files changed, 359 insertions(+), 340 deletions(-) diff --git a/datatransfer/channels/internal/internalchannel_cbor_gen.go b/datatransfer/channels/internal/internalchannel_cbor_gen.go index 81c4ca710..ca27388a3 100644 --- a/datatransfer/channels/internal/internalchannel_cbor_gen.go +++ b/datatransfer/channels/internal/internalchannel_cbor_gen.go @@ -5,6 +5,7 @@ package internal import ( "fmt" "io" + "math" "sort" datatransfer "github.com/filecoin-project/boost/datatransfer" @@ -16,6 +17,7 @@ import ( var _ = xerrors.Errorf var _ = cid.Undef +var _ = math.E var _ = sort.Sort func (t *ChannelState) MarshalCBOR(w io.Writer) error { @@ -23,94 +25,97 @@ func (t *ChannelState) MarshalCBOR(w io.Writer) error { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{180}); err != nil { + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{180}); err != nil { return err } - scratch := make([]byte, 9) - - // t.SelfPeer (peer.ID) (string) - if len("SelfPeer") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"SelfPeer\" was too long") + // t.Sent (uint64) (uint64) + if len("Sent") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Sent\" was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("SelfPeer"))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Sent"))); err != nil { return err } - if _, err := io.WriteString(w, string("SelfPeer")); err != nil { + if _, err := cw.WriteString(string("Sent")); err != nil { return err } - if len(t.SelfPeer) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.SelfPeer was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.SelfPeer))); err != nil { - return err - } - if _, err := io.WriteString(w, string(t.SelfPeer)); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Sent)); err != nil { return err } - // t.TransferID (datatransfer.TransferID) (uint64) - if len("TransferID") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"TransferID\" was too long") + // t.Queued (uint64) (uint64) + if len("Queued") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Queued\" was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("TransferID"))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Queued"))); err != nil { return err } - if _, err := io.WriteString(w, string("TransferID")); err != nil { + if _, err := cw.WriteString(string("Queued")); err != nil { return err } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.TransferID)); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Queued)); err != nil { return err } - // t.Initiator (peer.ID) (string) - if len("Initiator") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Initiator\" was too long") + // t.Sender (peer.ID) (string) + if len("Sender") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Sender\" was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Initiator"))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Sender"))); err != nil { return err } - if _, err := io.WriteString(w, string("Initiator")); err != nil { + if _, err := cw.WriteString(string("Sender")); err != nil { return err } - if len(t.Initiator) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Initiator was too long") + if len(t.Sender) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Sender was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Initiator))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Sender))); err != nil { return err } - if _, err := io.WriteString(w, string(t.Initiator)); err != nil { + if _, err := cw.WriteString(string(t.Sender)); err != nil { return err } - // t.Responder (peer.ID) (string) - if len("Responder") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Responder\" was too long") + // t.Stages (datatransfer.ChannelStages) (struct) + if len("Stages") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Stages\" was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Responder"))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Stages"))); err != nil { return err } - if _, err := io.WriteString(w, string("Responder")); err != nil { + if _, err := cw.WriteString(string("Stages")); err != nil { return err } - if len(t.Responder) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Responder was too long") + if err := t.Stages.MarshalCBOR(cw); err != nil { + return err + } + + // t.Status (datatransfer.Status) (uint64) + if len("Status") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Status\" was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Responder))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Status"))); err != nil { return err } - if _, err := io.WriteString(w, string(t.Responder)); err != nil { + if _, err := cw.WriteString(string("Status")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Status)); err != nil { return err } @@ -119,205 +124,219 @@ func (t *ChannelState) MarshalCBOR(w io.Writer) error { return xerrors.Errorf("Value in field \"BaseCid\" was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("BaseCid"))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("BaseCid"))); err != nil { return err } - if _, err := io.WriteString(w, string("BaseCid")); err != nil { + if _, err := cw.WriteString(string("BaseCid")); err != nil { return err } - if err := cbg.WriteCidBuf(scratch, w, t.BaseCid); err != nil { + if err := cbg.WriteCid(cw, t.BaseCid); err != nil { return xerrors.Errorf("failed to write cid field t.BaseCid: %w", err) } - // t.Selector (typegen.Deferred) (struct) - if len("Selector") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Selector\" was too long") + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Selector"))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Message"))); err != nil { return err } - if _, err := io.WriteString(w, string("Selector")); err != nil { + if _, err := cw.WriteString(string("Message")); err != nil { return err } - if err := t.Selector.MarshalCBOR(w); err != nil { - return err - } - - // t.Sender (peer.ID) (string) - if len("Sender") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Sender\" was too long") + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Sender"))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { return err } - if _, err := io.WriteString(w, string("Sender")); err != nil { + if _, err := cw.WriteString(string(t.Message)); err != nil { return err } - if len(t.Sender) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Sender was too long") + // t.Received (uint64) (uint64) + if len("Received") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Received\" was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Sender))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Received"))); err != nil { return err } - if _, err := io.WriteString(w, string(t.Sender)); err != nil { + if _, err := cw.WriteString(string("Received")); err != nil { return err } - // t.Recipient (peer.ID) (string) - if len("Recipient") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Recipient\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Recipient"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("Recipient")); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Received)); err != nil { return err } - if len(t.Recipient) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Recipient was too long") + // t.Selector (typegen.Deferred) (struct) + if len("Selector") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Selector\" was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Recipient))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Selector"))); err != nil { return err } - if _, err := io.WriteString(w, string(t.Recipient)); err != nil { + if _, err := cw.WriteString(string("Selector")); err != nil { return err } - // t.TotalSize (uint64) (uint64) - if len("TotalSize") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"TotalSize\" was too long") + if err := t.Selector.MarshalCBOR(cw); err != nil { + return err } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("TotalSize"))); err != nil { - return err + // t.SelfPeer (peer.ID) (string) + if len("SelfPeer") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SelfPeer\" was too long") } - if _, err := io.WriteString(w, string("TotalSize")); err != nil { + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SelfPeer"))); err != nil { return err } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.TotalSize)); err != nil { + if _, err := cw.WriteString(string("SelfPeer")); err != nil { return err } - // t.Status (datatransfer.Status) (uint64) - if len("Status") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Status\" was too long") + if len(t.SelfPeer) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.SelfPeer was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Status"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("Status")); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.SelfPeer))); err != nil { return err } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + if _, err := cw.WriteString(string(t.SelfPeer)); err != nil { return err } - // t.Queued (uint64) (uint64) - if len("Queued") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Queued\" was too long") + // t.Vouchers ([]internal.EncodedVoucher) (slice) + if len("Vouchers") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Vouchers\" was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Queued"))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Vouchers"))); err != nil { return err } - if _, err := io.WriteString(w, string("Queued")); err != nil { + if _, err := cw.WriteString(string("Vouchers")); err != nil { return err } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Queued)); err != nil { + if len(t.Vouchers) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Vouchers was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Vouchers))); err != nil { return err } + for _, v := range t.Vouchers { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } - // t.Sent (uint64) (uint64) - if len("Sent") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Sent\" was too long") + // t.Initiator (peer.ID) (string) + if len("Initiator") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Initiator\" was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Sent"))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Initiator"))); err != nil { return err } - if _, err := io.WriteString(w, string("Sent")); err != nil { + if _, err := cw.WriteString(string("Initiator")); err != nil { return err } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Sent)); err != nil { + if len(t.Initiator) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Initiator was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Initiator))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Initiator)); err != nil { return err } - // t.Received (uint64) (uint64) - if len("Received") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Received\" was too long") + // t.Recipient (peer.ID) (string) + if len("Recipient") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Recipient\" was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Received"))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Recipient"))); err != nil { return err } - if _, err := io.WriteString(w, string("Received")); err != nil { + if _, err := cw.WriteString(string("Recipient")); err != nil { return err } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Received)); err != nil { + if len(t.Recipient) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Recipient was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Recipient))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Recipient)); err != nil { return err } - // t.Message (string) (string) - if len("Message") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Message\" was too long") + // t.Responder (peer.ID) (string) + if len("Responder") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Responder\" was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Message"))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Responder"))); err != nil { return err } - if _, err := io.WriteString(w, string("Message")); err != nil { + if _, err := cw.WriteString(string("Responder")); err != nil { return err } - if len(t.Message) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Message was too long") + if len(t.Responder) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Responder was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Message))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Responder))); err != nil { return err } - if _, err := io.WriteString(w, string(t.Message)); err != nil { + if _, err := cw.WriteString(string(t.Responder)); err != nil { return err } - // t.Vouchers ([]internal.EncodedVoucher) (slice) - if len("Vouchers") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Vouchers\" was too long") + // t.TotalSize (uint64) (uint64) + if len("TotalSize") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TotalSize\" was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Vouchers"))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("TotalSize"))); err != nil { return err } - if _, err := io.WriteString(w, string("Vouchers")); err != nil { + if _, err := cw.WriteString(string("TotalSize")); err != nil { return err } - if len(t.Vouchers) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Vouchers was too long") + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TotalSize)); err != nil { + return err } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Vouchers))); err != nil { + // t.TransferID (datatransfer.TransferID) (uint64) + if len("TransferID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TransferID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("TransferID"))); err != nil { return err } - for _, v := range t.Vouchers { - if err := v.MarshalCBOR(w); err != nil { - return err - } + if _, err := cw.WriteString(string("TransferID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TransferID)); err != nil { + return err } // t.VoucherResults ([]internal.EncodedVoucherResult) (slice) @@ -325,10 +344,10 @@ func (t *ChannelState) MarshalCBOR(w io.Writer) error { return xerrors.Errorf("Value in field \"VoucherResults\" was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("VoucherResults"))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("VoucherResults"))); err != nil { return err } - if _, err := io.WriteString(w, string("VoucherResults")); err != nil { + if _, err := cw.WriteString(string("VoucherResults")); err != nil { return err } @@ -336,33 +355,33 @@ func (t *ChannelState) MarshalCBOR(w io.Writer) error { return xerrors.Errorf("Slice value in field t.VoucherResults was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.VoucherResults))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.VoucherResults))); err != nil { return err } for _, v := range t.VoucherResults { - if err := v.MarshalCBOR(w); err != nil { + if err := v.MarshalCBOR(cw); err != nil { return err } } - // t.ReceivedBlocksTotal (int64) (int64) - if len("ReceivedBlocksTotal") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"ReceivedBlocksTotal\" was too long") + // t.SentBlocksTotal (int64) (int64) + if len("SentBlocksTotal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SentBlocksTotal\" was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ReceivedBlocksTotal"))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SentBlocksTotal"))); err != nil { return err } - if _, err := io.WriteString(w, string("ReceivedBlocksTotal")); err != nil { + if _, err := cw.WriteString(string("SentBlocksTotal")); err != nil { return err } - if t.ReceivedBlocksTotal >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ReceivedBlocksTotal)); err != nil { + if t.SentBlocksTotal >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SentBlocksTotal)); err != nil { return err } } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.ReceivedBlocksTotal-1)); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.SentBlocksTotal-1)); err != nil { return err } } @@ -372,73 +391,62 @@ func (t *ChannelState) MarshalCBOR(w io.Writer) error { return xerrors.Errorf("Value in field \"QueuedBlocksTotal\" was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("QueuedBlocksTotal"))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("QueuedBlocksTotal"))); err != nil { return err } - if _, err := io.WriteString(w, string("QueuedBlocksTotal")); err != nil { + if _, err := cw.WriteString(string("QueuedBlocksTotal")); err != nil { return err } if t.QueuedBlocksTotal >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.QueuedBlocksTotal)); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.QueuedBlocksTotal)); err != nil { return err } } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.QueuedBlocksTotal-1)); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.QueuedBlocksTotal-1)); err != nil { return err } } - // t.SentBlocksTotal (int64) (int64) - if len("SentBlocksTotal") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"SentBlocksTotal\" was too long") + // t.ReceivedBlocksTotal (int64) (int64) + if len("ReceivedBlocksTotal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ReceivedBlocksTotal\" was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("SentBlocksTotal"))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ReceivedBlocksTotal"))); err != nil { return err } - if _, err := io.WriteString(w, string("SentBlocksTotal")); err != nil { + if _, err := cw.WriteString(string("ReceivedBlocksTotal")); err != nil { return err } - if t.SentBlocksTotal >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SentBlocksTotal)); err != nil { + if t.ReceivedBlocksTotal >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ReceivedBlocksTotal)); err != nil { return err } } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SentBlocksTotal-1)); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.ReceivedBlocksTotal-1)); err != nil { return err } } - - // t.Stages (datatransfer.ChannelStages) (struct) - if len("Stages") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Stages\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Stages"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("Stages")); err != nil { - return err - } - - if err := t.Stages.MarshalCBOR(w); err != nil { - return err - } return nil } -func (t *ChannelState) UnmarshalCBOR(r io.Reader) error { +func (t *ChannelState) UnmarshalCBOR(r io.Reader) (err error) { *t = ChannelState{} - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) + cr := cbg.NewCborReader(r) - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + maj, extra, err := cr.ReadHeader() if err != nil { return err } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + if maj != cbg.MajMap { return fmt.Errorf("cbor input should be of type map") } @@ -453,7 +461,7 @@ func (t *ChannelState) UnmarshalCBOR(r io.Reader) error { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadStringBuf(br, scratch) + sval, err := cbg.ReadString(cr) if err != nil { return err } @@ -462,113 +470,65 @@ func (t *ChannelState) UnmarshalCBOR(r io.Reader) error { } switch name { - // t.SelfPeer (peer.ID) (string) - case "SelfPeer": - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - t.SelfPeer = peer.ID(sval) - } - // t.TransferID (datatransfer.TransferID) (uint64) - case "TransferID": + // t.Sent (uint64) (uint64) + case "Sent": { - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + maj, extra, err = cr.ReadHeader() if err != nil { return err } if maj != cbg.MajUnsignedInt { return fmt.Errorf("wrong type for uint64 field") } - t.TransferID = datatransfer.TransferID(extra) + t.Sent = uint64(extra) } - // t.Initiator (peer.ID) (string) - case "Initiator": + // t.Queued (uint64) (uint64) + case "Queued": { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - t.Initiator = peer.ID(sval) - } - // t.Responder (peer.ID) (string) - case "Responder": - { - sval, err := cbg.ReadStringBuf(br, scratch) + maj, extra, err = cr.ReadHeader() if err != nil { return err } - - t.Responder = peer.ID(sval) - } - // t.BaseCid (cid.Cid) (struct) - case "BaseCid": - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.BaseCid: %w", err) + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") } + t.Queued = uint64(extra) - t.BaseCid = c - - } - // t.Selector (typegen.Deferred) (struct) - case "Selector": - - { - - t.Selector = new(cbg.Deferred) - - if err := t.Selector.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("failed to read deferred field: %w", err) - } } // t.Sender (peer.ID) (string) case "Sender": { - sval, err := cbg.ReadStringBuf(br, scratch) + sval, err := cbg.ReadString(cr) if err != nil { return err } t.Sender = peer.ID(sval) } - // t.Recipient (peer.ID) (string) - case "Recipient": - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - t.Recipient = peer.ID(sval) - } - // t.TotalSize (uint64) (uint64) - case "TotalSize": + // t.Stages (datatransfer.ChannelStages) (struct) + case "Stages": { - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + b, err := cr.ReadByte() if err != nil { return err } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Stages = new(datatransfer.ChannelStages) + if err := t.Stages.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Stages pointer: %w", err) + } } - t.TotalSize = uint64(extra) } // t.Status (datatransfer.Status) (uint64) @@ -576,7 +536,7 @@ func (t *ChannelState) UnmarshalCBOR(r io.Reader) error { { - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + maj, extra, err = cr.ReadHeader() if err != nil { return err } @@ -586,42 +546,36 @@ func (t *ChannelState) UnmarshalCBOR(r io.Reader) error { t.Status = datatransfer.Status(extra) } - // t.Queued (uint64) (uint64) - case "Queued": + // t.BaseCid (cid.Cid) (struct) + case "BaseCid": { - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + c, err := cbg.ReadCid(cr) if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") + return xerrors.Errorf("failed to read cid field t.BaseCid: %w", err) } - t.Queued = uint64(extra) + + t.BaseCid = c } - // t.Sent (uint64) (uint64) - case "Sent": + // t.Message (string) (string) + case "Message": { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + sval, err := cbg.ReadString(cr) if err != nil { return err } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Sent = uint64(extra) + t.Message = string(sval) } // t.Received (uint64) (uint64) case "Received": { - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + maj, extra, err = cr.ReadHeader() if err != nil { return err } @@ -631,21 +585,32 @@ func (t *ChannelState) UnmarshalCBOR(r io.Reader) error { t.Received = uint64(extra) } - // t.Message (string) (string) - case "Message": + // t.Selector (typegen.Deferred) (struct) + case "Selector": + + { + + t.Selector = new(cbg.Deferred) + + if err := t.Selector.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("failed to read deferred field: %w", err) + } + } + // t.SelfPeer (peer.ID) (string) + case "SelfPeer": { - sval, err := cbg.ReadStringBuf(br, scratch) + sval, err := cbg.ReadString(cr) if err != nil { return err } - t.Message = string(sval) + t.SelfPeer = peer.ID(sval) } // t.Vouchers ([]internal.EncodedVoucher) (slice) case "Vouchers": - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + maj, extra, err = cr.ReadHeader() if err != nil { return err } @@ -665,17 +630,80 @@ func (t *ChannelState) UnmarshalCBOR(r io.Reader) error { for i := 0; i < int(extra); i++ { var v EncodedVoucher - if err := v.UnmarshalCBOR(br); err != nil { + if err := v.UnmarshalCBOR(cr); err != nil { return err } t.Vouchers[i] = v } + // t.Initiator (peer.ID) (string) + case "Initiator": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Initiator = peer.ID(sval) + } + // t.Recipient (peer.ID) (string) + case "Recipient": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Recipient = peer.ID(sval) + } + // t.Responder (peer.ID) (string) + case "Responder": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Responder = peer.ID(sval) + } + // t.TotalSize (uint64) (uint64) + case "TotalSize": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.TotalSize = uint64(extra) + + } + // t.TransferID (datatransfer.TransferID) (uint64) + case "TransferID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.TransferID = datatransfer.TransferID(extra) + + } // t.VoucherResults ([]internal.EncodedVoucherResult) (slice) case "VoucherResults": - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + maj, extra, err = cr.ReadHeader() if err != nil { return err } @@ -695,17 +723,17 @@ func (t *ChannelState) UnmarshalCBOR(r io.Reader) error { for i := 0; i < int(extra); i++ { var v EncodedVoucherResult - if err := v.UnmarshalCBOR(br); err != nil { + if err := v.UnmarshalCBOR(cr); err != nil { return err } t.VoucherResults[i] = v } - // t.ReceivedBlocksTotal (int64) (int64) - case "ReceivedBlocksTotal": + // t.SentBlocksTotal (int64) (int64) + case "SentBlocksTotal": { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + maj, extra, err := cr.ReadHeader() var extraI int64 if err != nil { return err @@ -719,19 +747,19 @@ func (t *ChannelState) UnmarshalCBOR(r io.Reader) error { case cbg.MajNegativeInt: extraI = int64(extra) if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") + return fmt.Errorf("int64 negative overflow") } extraI = -1 - extraI default: return fmt.Errorf("wrong type for int64 field: %d", maj) } - t.ReceivedBlocksTotal = int64(extraI) + t.SentBlocksTotal = int64(extraI) } // t.QueuedBlocksTotal (int64) (int64) case "QueuedBlocksTotal": { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + maj, extra, err := cr.ReadHeader() var extraI int64 if err != nil { return err @@ -745,7 +773,7 @@ func (t *ChannelState) UnmarshalCBOR(r io.Reader) error { case cbg.MajNegativeInt: extraI = int64(extra) if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") + return fmt.Errorf("int64 negative overflow") } extraI = -1 - extraI default: @@ -754,10 +782,10 @@ func (t *ChannelState) UnmarshalCBOR(r io.Reader) error { t.QueuedBlocksTotal = int64(extraI) } - // t.SentBlocksTotal (int64) (int64) - case "SentBlocksTotal": + // t.ReceivedBlocksTotal (int64) (int64) + case "ReceivedBlocksTotal": { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + maj, extra, err := cr.ReadHeader() var extraI int64 if err != nil { return err @@ -771,34 +799,14 @@ func (t *ChannelState) UnmarshalCBOR(r io.Reader) error { case cbg.MajNegativeInt: extraI = int64(extra) if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") + return fmt.Errorf("int64 negative overflow") } extraI = -1 - extraI default: return fmt.Errorf("wrong type for int64 field: %d", maj) } - t.SentBlocksTotal = int64(extraI) - } - // t.Stages (datatransfer.ChannelStages) (struct) - case "Stages": - - { - - b, err := br.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := br.UnreadByte(); err != nil { - return err - } - t.Stages = new(datatransfer.ChannelStages) - if err := t.Stages.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Stages pointer: %w", err) - } - } - + t.ReceivedBlocksTotal = int64(extraI) } default: @@ -814,21 +822,22 @@ func (t *EncodedVoucher) MarshalCBOR(w io.Writer) error { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{162}); err != nil { + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { return err } - scratch := make([]byte, 9) - // t.Type (datatransfer.TypeIdentifier) (string) if len("Type") > cbg.MaxLength { return xerrors.Errorf("Value in field \"Type\" was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Type"))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Type"))); err != nil { return err } - if _, err := io.WriteString(w, string("Type")); err != nil { + if _, err := cw.WriteString(string("Type")); err != nil { return err } @@ -836,10 +845,10 @@ func (t *EncodedVoucher) MarshalCBOR(w io.Writer) error { return xerrors.Errorf("Value in field t.Type was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Type))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Type))); err != nil { return err } - if _, err := io.WriteString(w, string(t.Type)); err != nil { + if _, err := cw.WriteString(string(t.Type)); err != nil { return err } @@ -848,29 +857,34 @@ func (t *EncodedVoucher) MarshalCBOR(w io.Writer) error { return xerrors.Errorf("Value in field \"Voucher\" was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Voucher"))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Voucher"))); err != nil { return err } - if _, err := io.WriteString(w, string("Voucher")); err != nil { + if _, err := cw.WriteString(string("Voucher")); err != nil { return err } - if err := t.Voucher.MarshalCBOR(w); err != nil { + if err := t.Voucher.MarshalCBOR(cw); err != nil { return err } return nil } -func (t *EncodedVoucher) UnmarshalCBOR(r io.Reader) error { +func (t *EncodedVoucher) UnmarshalCBOR(r io.Reader) (err error) { *t = EncodedVoucher{} - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) + cr := cbg.NewCborReader(r) - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + maj, extra, err := cr.ReadHeader() if err != nil { return err } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + if maj != cbg.MajMap { return fmt.Errorf("cbor input should be of type map") } @@ -885,7 +899,7 @@ func (t *EncodedVoucher) UnmarshalCBOR(r io.Reader) error { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadStringBuf(br, scratch) + sval, err := cbg.ReadString(cr) if err != nil { return err } @@ -898,7 +912,7 @@ func (t *EncodedVoucher) UnmarshalCBOR(r io.Reader) error { case "Type": { - sval, err := cbg.ReadStringBuf(br, scratch) + sval, err := cbg.ReadString(cr) if err != nil { return err } @@ -912,7 +926,7 @@ func (t *EncodedVoucher) UnmarshalCBOR(r io.Reader) error { t.Voucher = new(cbg.Deferred) - if err := t.Voucher.UnmarshalCBOR(br); err != nil { + if err := t.Voucher.UnmarshalCBOR(cr); err != nil { return xerrors.Errorf("failed to read deferred field: %w", err) } } @@ -930,21 +944,22 @@ func (t *EncodedVoucherResult) MarshalCBOR(w io.Writer) error { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{162}); err != nil { + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { return err } - scratch := make([]byte, 9) - // t.Type (datatransfer.TypeIdentifier) (string) if len("Type") > cbg.MaxLength { return xerrors.Errorf("Value in field \"Type\" was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Type"))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Type"))); err != nil { return err } - if _, err := io.WriteString(w, string("Type")); err != nil { + if _, err := cw.WriteString(string("Type")); err != nil { return err } @@ -952,10 +967,10 @@ func (t *EncodedVoucherResult) MarshalCBOR(w io.Writer) error { return xerrors.Errorf("Value in field t.Type was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Type))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Type))); err != nil { return err } - if _, err := io.WriteString(w, string(t.Type)); err != nil { + if _, err := cw.WriteString(string(t.Type)); err != nil { return err } @@ -964,29 +979,34 @@ func (t *EncodedVoucherResult) MarshalCBOR(w io.Writer) error { return xerrors.Errorf("Value in field \"VoucherResult\" was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("VoucherResult"))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("VoucherResult"))); err != nil { return err } - if _, err := io.WriteString(w, string("VoucherResult")); err != nil { + if _, err := cw.WriteString(string("VoucherResult")); err != nil { return err } - if err := t.VoucherResult.MarshalCBOR(w); err != nil { + if err := t.VoucherResult.MarshalCBOR(cw); err != nil { return err } return nil } -func (t *EncodedVoucherResult) UnmarshalCBOR(r io.Reader) error { +func (t *EncodedVoucherResult) UnmarshalCBOR(r io.Reader) (err error) { *t = EncodedVoucherResult{} - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) + cr := cbg.NewCborReader(r) - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + maj, extra, err := cr.ReadHeader() if err != nil { return err } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + if maj != cbg.MajMap { return fmt.Errorf("cbor input should be of type map") } @@ -1001,7 +1021,7 @@ func (t *EncodedVoucherResult) UnmarshalCBOR(r io.Reader) error { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadStringBuf(br, scratch) + sval, err := cbg.ReadString(cr) if err != nil { return err } @@ -1014,7 +1034,7 @@ func (t *EncodedVoucherResult) UnmarshalCBOR(r io.Reader) error { case "Type": { - sval, err := cbg.ReadStringBuf(br, scratch) + sval, err := cbg.ReadString(cr) if err != nil { return err } @@ -1028,7 +1048,7 @@ func (t *EncodedVoucherResult) UnmarshalCBOR(r io.Reader) error { t.VoucherResult = new(cbg.Deferred) - if err := t.VoucherResult.UnmarshalCBOR(br); err != nil { + if err := t.VoucherResult.UnmarshalCBOR(cr); err != nil { return xerrors.Errorf("failed to read deferred field: %w", err) } } diff --git a/go.mod b/go.mod index f4514f529..55246a2f9 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,6 @@ require ( github.com/filecoin-project/go-bitfield v0.2.4 github.com/filecoin-project/go-cbor-util v0.0.1 github.com/filecoin-project/go-commp-utils v0.1.4 - github.com/filecoin-project/go-data-transfer v1.15.4-boost github.com/filecoin-project/go-fil-commcid v0.1.0 github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 github.com/filecoin-project/go-jsonrpc v0.3.1 @@ -341,6 +340,7 @@ require ( require ( github.com/Jorropo/jsync v1.0.1 // indirect + github.com/filecoin-project/go-data-transfer v1.15.4-boost // indirect github.com/filecoin-project/go-data-transfer/v2 v2.0.0-rc7 // indirect github.com/filecoin-project/kubo-api-client v0.0.2-0.20230829103503-14448166d14d // indirect github.com/gammazero/channelqueue v0.2.1 // indirect diff --git a/node/builder.go b/node/builder.go index b180a12a6..b54b52c98 100644 --- a/node/builder.go +++ b/node/builder.go @@ -535,6 +535,7 @@ func ConfigBoost(cfg *config.Boost) Option { })), // Lotus Markets + Override(new(dtypes.ProviderTransport), modules.NewProviderTransport), Override(new(dtypes.ProviderTransferNetwork), modules.NewProviderTransferNetwork), Override(new(server.ProviderDataTransfer), server.NewProviderDataTransfer), Override(new(server.RetrievalAskGetter), server.NewRetrievalAskGetter), @@ -561,8 +562,6 @@ func ConfigBoost(cfg *config.Boost) Option { Override(new(provider.Interface), modules.IndexProvider(cfg.IndexProvider)), // Lotus Markets (storage) - Override(new(dtypes.ProviderTransport), modules.NewProviderTransport), - Override(new(fsm.Group), modules.NewLegacyDealsFSM(cfg)), Override(HandleBoostDealsKey, modules.HandleBoostLibp2pDeals(cfg)), Override(HandleContractDealsKey, modules.HandleContractDeals(&cfg.ContractDeals)), From 9c198ffb9a48f731a1343d8c0270a934d75ad5b0 Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Tue, 31 Oct 2023 18:37:07 +0400 Subject: [PATCH 14/34] cleanup go-data-transfer --- api/docgen/docgen.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/docgen/docgen.go b/api/docgen/docgen.go index 5399ad37c..944419058 100644 --- a/api/docgen/docgen.go +++ b/api/docgen/docgen.go @@ -12,10 +12,10 @@ import ( "time" "unicode" + "github.com/filecoin-project/boost/datatransfer" "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" - datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/filecoin-project/go-jsonrpc/auth" "github.com/google/uuid" "github.com/ipfs/go-cid" From 95b1b15c07023ce1ad711e55b4633b5cbe54d90b Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Wed, 1 Nov 2023 14:18:28 +0400 Subject: [PATCH 15/34] fix graphsync retrievals --- itests/dummydeal_offline_test.go | 1 - itests/framework/framework.go | 86 ++++++++++++++------------ node/builder.go | 29 +-------- retrievalmarket/server/datatransfer.go | 1 + 4 files changed, 48 insertions(+), 69 deletions(-) diff --git a/itests/dummydeal_offline_test.go b/itests/dummydeal_offline_test.go index 32df3219d..296d02600 100644 --- a/itests/dummydeal_offline_test.go +++ b/itests/dummydeal_offline_test.go @@ -46,7 +46,6 @@ func TestDummydealOffline(t *testing.T) { err = f.WaitForDealAddedToSector(offlineDealUuid) require.NoError(t, err) - // rootCid is an identity CID outFile := f.Retrieve(ctx, t, tempdir, rootCid, dealRes.DealParams.ClientDealProposal.Proposal.PieceCID, true, nil) kit.AssertFilesEqual(t, randomFilepath, outFile) } diff --git a/itests/framework/framework.go b/itests/framework/framework.go index a91ea1d43..c90f4752d 100644 --- a/itests/framework/framework.go +++ b/itests/framework/framework.go @@ -844,61 +844,65 @@ func (f *TestFramework) Retrieve(ctx context.Context, t *testing.T, tempdir stri dservOffline := dag.NewDAGService(blockservice.New(bstore, offline.Exchange(bstore))) // if we used a selector - need to find the sub-root the user actually wanted to retrieve - if !selectorNode.IsNull() { - var subRootFound bool - err := utils.TraverseDag( - ctx, - dservOffline, - root, - selectorNode, - func(p traversal.Progress, n ipld.Node, r traversal.VisitReason) error { - if r == traversal.VisitReason_SelectionMatch { - - if p.LastBlock.Path.String() != p.Path.String() { - return xerrors.Errorf("unsupported selection path '%s' does not correspond to a node boundary (a.k.a. CID link)", p.Path.String()) + if selectorNode != nil { + if !selectorNode.IsNull() { + var subRootFound bool + err := utils.TraverseDag( + ctx, + dservOffline, + root, + selectorNode, + func(p traversal.Progress, n ipld.Node, r traversal.VisitReason) error { + if r == traversal.VisitReason_SelectionMatch { + + if p.LastBlock.Path.String() != p.Path.String() { + return xerrors.Errorf("unsupported selection path '%s' does not correspond to a node boundary (a.k.a. CID link)", p.Path.String()) + } + + cidLnk, castOK := p.LastBlock.Link.(cidlink.Link) + if !castOK { + return xerrors.Errorf("cidlink cast unexpectedly failed on '%s'", p.LastBlock.Link.String()) + } + + root = cidLnk.Cid + subRootFound = true } - - cidLnk, castOK := p.LastBlock.Link.(cidlink.Link) - if !castOK { - return xerrors.Errorf("cidlink cast unexpectedly failed on '%s'", p.LastBlock.Link.String()) - } - - root = cidLnk.Cid - subRootFound = true - } - return nil - }, - ) - require.NoError(t, err) - require.True(t, subRootFound) + return nil + }, + ) + require.NoError(t, err) + require.True(t, subRootFound) + } } dnode, err := dservOffline.Get(ctx, root) require.NoError(t, err) var out string + retPath := path.Join(tempdir, "retrievals") + _ = os.Mkdir(retPath, 0755) if !extractCar { // Write file as car file - file, err1 := os.CreateTemp(path.Join(tempdir, "retrievals"), "*"+root.String()+".car") - require.NoError(t, err1) + file, err := os.CreateTemp(retPath, "*"+root.String()+".car") + require.NoError(t, err) out = file.Name() - err1 = car.WriteCar(ctx, dservOffline, []cid.Cid{root}, file) - require.NoError(t, err1) + err = car.WriteCar(ctx, dservOffline, []cid.Cid{root}, file) + require.NoError(t, err) } else { // Otherwise write file as UnixFS File - ufsFile, err1 := unixfile.NewUnixfsFile(ctx, dservOffline, dnode) - require.NoError(t, err1) - file, err1 := os.CreateTemp(path.Join(tempdir, "retrievals"), "*"+root.String()) - require.NoError(t, err1) - err1 = file.Close() - require.NoError(t, err1) - err1 = os.Remove(file.Name()) - require.NoError(t, err1) - err1 = files.WriteTo(ufsFile, file.Name()) - require.NoError(t, err1) - + ufsFile, err := unixfile.NewUnixfsFile(ctx, dservOffline, dnode) + require.NoError(t, err) + file, err := os.CreateTemp(retPath, "*"+root.String()) + require.NoError(t, err) + err = file.Close() + require.NoError(t, err) + err = os.Remove(file.Name()) + require.NoError(t, err) + err = files.WriteTo(ufsFile, file.Name()) + require.NoError(t, err) + out = file.Name() } return out diff --git a/node/builder.go b/node/builder.go index b54b52c98..9dc155c13 100644 --- a/node/builder.go +++ b/node/builder.go @@ -131,47 +131,22 @@ const ( StartListeningKey BootstrapKey - // filecoin - SetGenesisKey - - RunHelloKey - RunChainExchangeKey - RunChainGraphsync - RunPeerMgrKey - - HandleIncomingBlocksKey - HandleIncomingMessagesKey - HandleMigrateClientFundsKey - HandlePaymentChannelManagerKey - // miner - GetParamsKey + StartProviderDataTransferKey StartPieceDoctorKey - HandleMigrateProviderFundsKey - HandleDealsKey HandleCreateRetrievalTablesKey - HandleSetShardSelector - HandleSetRetrievalAskGetter HandleRetrievalEventsKey - HandleRetrievalKey HandleRetrievalAskKey HandleRetrievalTransportsKey HandleProtocolProxyKey - RunSectorServiceKey // boost should be started after legacy markets (HandleDealsKey) HandleBoostDealsKey HandleContractDealsKey HandleProposalLogCleanerKey - HandleOnlineBackupMgrKey // daemon ExtractApiKey - HeadMetricsKey - SettlePaymentChannelsKey - RunPeerTaggerKey - SetupFallbackBlockstoresKey - HandleSetLinkSystem SetApiEndpointKey @@ -537,7 +512,7 @@ func ConfigBoost(cfg *config.Boost) Option { // Lotus Markets Override(new(dtypes.ProviderTransport), modules.NewProviderTransport), Override(new(dtypes.ProviderTransferNetwork), modules.NewProviderTransferNetwork), - Override(new(server.ProviderDataTransfer), server.NewProviderDataTransfer), + Override(StartProviderDataTransferKey, server.NewProviderDataTransfer), Override(new(server.RetrievalAskGetter), server.NewRetrievalAskGetter), Override(new(*server.GraphsyncUnpaidRetrieval), modules.RetrievalGraphsync(cfg.LotusDealmaking.SimultaneousTransfersForStorage, cfg.LotusDealmaking.SimultaneousTransfersForStoragePerClient, cfg.LotusDealmaking.SimultaneousTransfersForRetrieval)), Override(new(dtypes.StagingGraphsync), From(new(*server.GraphsyncUnpaidRetrieval))), diff --git a/retrievalmarket/server/datatransfer.go b/retrievalmarket/server/datatransfer.go index ed7ca9f98..dc841edd5 100644 --- a/retrievalmarket/server/datatransfer.go +++ b/retrievalmarket/server/datatransfer.go @@ -31,6 +31,7 @@ func NewProviderDataTransfer(lc fx.Lifecycle, net dtypes.ProviderTransferNetwork lc.Append(fx.Hook{ OnStart: func(ctx context.Context) error { dt.SubscribeToEvents(marketevents.DataTransferLogger) + log.Infof("started provider data transfer") return dt.Start(ctx) }, OnStop: func(ctx context.Context) error { From 40fdb8193cd9b4a14cdc0105cc12efde2bdd6f21 Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Mon, 4 Dec 2023 15:50:29 +0400 Subject: [PATCH 16/34] use new client for GS tests --- go.sum | 2 + itests/framework/framework.go | 9 +- retrievalmarket/mock/gen.go | 4 - retrievalmarket/mock/piecestore.go | 152 ---- retrievalmarket/mock/retrievalmarket.go | 229 ------ .../server/gsunpaidretrieval_test.go | 711 ++++++++++-------- retrievalmarket/testutil/testutil.go | 100 +++ 7 files changed, 499 insertions(+), 708 deletions(-) delete mode 100644 retrievalmarket/mock/gen.go delete mode 100644 retrievalmarket/mock/piecestore.go delete mode 100644 retrievalmarket/mock/retrievalmarket.go create mode 100644 retrievalmarket/testutil/testutil.go diff --git a/go.sum b/go.sum index 2b9f602af..e3644eeaf 100644 --- a/go.sum +++ b/go.sum @@ -377,6 +377,7 @@ github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psS github.com/filecoin-project/go-state-types v0.1.6/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q= github.com/filecoin-project/go-state-types v0.1.10/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q= github.com/filecoin-project/go-state-types v0.11.1/go.mod h1:SyNPwTsU7I22gL2r0OAPcImvLoTVfgRwdK/Y5rR1zz8= +github.com/filecoin-project/go-state-types v0.11.2-0.20230712101859-8f37624fa540/go.mod h1:SyNPwTsU7I22gL2r0OAPcImvLoTVfgRwdK/Y5rR1zz8= github.com/filecoin-project/go-state-types v0.12.1 h1:/1ip/jXIP4QzWd3hlaQ7RGp1DHKKYG3+NOhd/r08UJY= github.com/filecoin-project/go-state-types v0.12.1/go.mod h1:KOBGyvCalT8uHBS7KSKOVbjsilD90bBZHgLAqrzz6gU= github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= @@ -2496,6 +2497,7 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= diff --git a/itests/framework/framework.go b/itests/framework/framework.go index c90f4752d..f7bb51525 100644 --- a/itests/framework/framework.go +++ b/itests/framework/framework.go @@ -83,7 +83,6 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" "golang.org/x/term" - "golang.org/x/xerrors" ) var Log = logging.Logger("boosttest") @@ -855,14 +854,10 @@ func (f *TestFramework) Retrieve(ctx context.Context, t *testing.T, tempdir stri func(p traversal.Progress, n ipld.Node, r traversal.VisitReason) error { if r == traversal.VisitReason_SelectionMatch { - if p.LastBlock.Path.String() != p.Path.String() { - return xerrors.Errorf("unsupported selection path '%s' does not correspond to a node boundary (a.k.a. CID link)", p.Path.String()) - } + require.Equal(t, p.LastBlock.Path.String(), p.Path.String()) cidLnk, castOK := p.LastBlock.Link.(cidlink.Link) - if !castOK { - return xerrors.Errorf("cidlink cast unexpectedly failed on '%s'", p.LastBlock.Link.String()) - } + require.True(t, castOK) root = cidLnk.Cid subRootFound = true diff --git a/retrievalmarket/mock/gen.go b/retrievalmarket/mock/gen.go deleted file mode 100644 index f99d4ba80..000000000 --- a/retrievalmarket/mock/gen.go +++ /dev/null @@ -1,4 +0,0 @@ -package mock - -//go:generate go run github.com/golang/mock/mockgen -destination=./piecestore.go -package=mock github.com/filecoin-project/boost/markets/piecestore PieceStore -//go:generate go run github.com/golang/mock/mockgen -destination=./retrievalmarket.go -package=mock github.com/filecoin-project/Boost/retrievalmarket/legacyretrievaltypes RetrievalProvider,SectorAccessor diff --git a/retrievalmarket/mock/piecestore.go b/retrievalmarket/mock/piecestore.go deleted file mode 100644 index 4ad67e41e..000000000 --- a/retrievalmarket/mock/piecestore.go +++ /dev/null @@ -1,152 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/filecoin-project/boost/markets/piecestore (interfaces: PieceStore) - -// Package mock is a generated GoMock package. -package mock - -import ( - context "context" - reflect "reflect" - - piecestore "github.com/filecoin-project/boost/markets/piecestore" - shared "github.com/filecoin-project/boost/markets/shared" - gomock "github.com/golang/mock/gomock" - cid "github.com/ipfs/go-cid" -) - -// MockPieceStore is a mock of PieceStore interface. -type MockPieceStore struct { - ctrl *gomock.Controller - recorder *MockPieceStoreMockRecorder -} - -// MockPieceStoreMockRecorder is the mock recorder for MockPieceStore. -type MockPieceStoreMockRecorder struct { - mock *MockPieceStore -} - -// NewMockPieceStore creates a new mock instance. -func NewMockPieceStore(ctrl *gomock.Controller) *MockPieceStore { - mock := &MockPieceStore{ctrl: ctrl} - mock.recorder = &MockPieceStoreMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockPieceStore) EXPECT() *MockPieceStoreMockRecorder { - return m.recorder -} - -// AddDealForPiece mocks base method. -func (m *MockPieceStore) AddDealForPiece(arg0, arg1 cid.Cid, arg2 piecestore.DealInfo) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddDealForPiece", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// AddDealForPiece indicates an expected call of AddDealForPiece. -func (mr *MockPieceStoreMockRecorder) AddDealForPiece(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddDealForPiece", reflect.TypeOf((*MockPieceStore)(nil).AddDealForPiece), arg0, arg1, arg2) -} - -// AddPieceBlockLocations mocks base method. -func (m *MockPieceStore) AddPieceBlockLocations(arg0 cid.Cid, arg1 map[cid.Cid]piecestore.BlockLocation) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddPieceBlockLocations", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// AddPieceBlockLocations indicates an expected call of AddPieceBlockLocations. -func (mr *MockPieceStoreMockRecorder) AddPieceBlockLocations(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddPieceBlockLocations", reflect.TypeOf((*MockPieceStore)(nil).AddPieceBlockLocations), arg0, arg1) -} - -// GetCIDInfo mocks base method. -func (m *MockPieceStore) GetCIDInfo(arg0 cid.Cid) (piecestore.CIDInfo, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCIDInfo", arg0) - ret0, _ := ret[0].(piecestore.CIDInfo) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetCIDInfo indicates an expected call of GetCIDInfo. -func (mr *MockPieceStoreMockRecorder) GetCIDInfo(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCIDInfo", reflect.TypeOf((*MockPieceStore)(nil).GetCIDInfo), arg0) -} - -// GetPieceInfo mocks base method. -func (m *MockPieceStore) GetPieceInfo(arg0 cid.Cid) (piecestore.PieceInfo, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPieceInfo", arg0) - ret0, _ := ret[0].(piecestore.PieceInfo) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPieceInfo indicates an expected call of GetPieceInfo. -func (mr *MockPieceStoreMockRecorder) GetPieceInfo(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPieceInfo", reflect.TypeOf((*MockPieceStore)(nil).GetPieceInfo), arg0) -} - -// ListCidInfoKeys mocks base method. -func (m *MockPieceStore) ListCidInfoKeys() ([]cid.Cid, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListCidInfoKeys") - ret0, _ := ret[0].([]cid.Cid) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListCidInfoKeys indicates an expected call of ListCidInfoKeys. -func (mr *MockPieceStoreMockRecorder) ListCidInfoKeys() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListCidInfoKeys", reflect.TypeOf((*MockPieceStore)(nil).ListCidInfoKeys)) -} - -// ListPieceInfoKeys mocks base method. -func (m *MockPieceStore) ListPieceInfoKeys() ([]cid.Cid, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListPieceInfoKeys") - ret0, _ := ret[0].([]cid.Cid) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListPieceInfoKeys indicates an expected call of ListPieceInfoKeys. -func (mr *MockPieceStoreMockRecorder) ListPieceInfoKeys() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPieceInfoKeys", reflect.TypeOf((*MockPieceStore)(nil).ListPieceInfoKeys)) -} - -// OnReady mocks base method. -func (m *MockPieceStore) OnReady(arg0 shared.ReadyFunc) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "OnReady", arg0) -} - -// OnReady indicates an expected call of OnReady. -func (mr *MockPieceStoreMockRecorder) OnReady(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnReady", reflect.TypeOf((*MockPieceStore)(nil).OnReady), arg0) -} - -// Start mocks base method. -func (m *MockPieceStore) Start(arg0 context.Context) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Start", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Start indicates an expected call of Start. -func (mr *MockPieceStoreMockRecorder) Start(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockPieceStore)(nil).Start), arg0) -} diff --git a/retrievalmarket/mock/retrievalmarket.go b/retrievalmarket/mock/retrievalmarket.go deleted file mode 100644 index 9e4c9d075..000000000 --- a/retrievalmarket/mock/retrievalmarket.go +++ /dev/null @@ -1,229 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/filecoin-project/go-fil-markets/retrievalmarket (interfaces: RetrievalProvider,SectorAccessor) - -// Package mock is a generated GoMock package. -package mock - -import ( - context "context" - io "io" - reflect "reflect" - - retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket" - shared "github.com/filecoin-project/go-fil-markets/shared" - abi "github.com/filecoin-project/go-state-types/abi" - gomock "github.com/golang/mock/gomock" -) - -// MockRetrievalProvider is a mock of RetrievalProvider interface. -type MockRetrievalProvider struct { - ctrl *gomock.Controller - recorder *MockRetrievalProviderMockRecorder -} - -// MockRetrievalProviderMockRecorder is the mock recorder for MockRetrievalProvider. -type MockRetrievalProviderMockRecorder struct { - mock *MockRetrievalProvider -} - -// NewMockRetrievalProvider creates a new mock instance. -func NewMockRetrievalProvider(ctrl *gomock.Controller) *MockRetrievalProvider { - mock := &MockRetrievalProvider{ctrl: ctrl} - mock.recorder = &MockRetrievalProviderMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockRetrievalProvider) EXPECT() *MockRetrievalProviderMockRecorder { - return m.recorder -} - -// GetAsk mocks base method. -func (m *MockRetrievalProvider) GetAsk() *retrievalmarket.Ask { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAsk") - ret0, _ := ret[0].(*retrievalmarket.Ask) - return ret0 -} - -// GetAsk indicates an expected call of GetAsk. -func (mr *MockRetrievalProviderMockRecorder) GetAsk() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAsk", reflect.TypeOf((*MockRetrievalProvider)(nil).GetAsk)) -} - -// GetDynamicAsk mocks base method. -func (m *MockRetrievalProvider) GetDynamicAsk(arg0 context.Context, arg1 retrievalmarket.PricingInput, arg2 []abi.DealID) (retrievalmarket.Ask, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDynamicAsk", arg0, arg1, arg2) - ret0, _ := ret[0].(retrievalmarket.Ask) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetDynamicAsk indicates an expected call of GetDynamicAsk. -func (mr *MockRetrievalProviderMockRecorder) GetDynamicAsk(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDynamicAsk", reflect.TypeOf((*MockRetrievalProvider)(nil).GetDynamicAsk), arg0, arg1, arg2) -} - -// ListDeals mocks base method. -func (m *MockRetrievalProvider) ListDeals() map[retrievalmarket.ProviderDealIdentifier]retrievalmarket.ProviderDealState { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListDeals") - ret0, _ := ret[0].(map[retrievalmarket.ProviderDealIdentifier]retrievalmarket.ProviderDealState) - return ret0 -} - -// ListDeals indicates an expected call of ListDeals. -func (mr *MockRetrievalProviderMockRecorder) ListDeals() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListDeals", reflect.TypeOf((*MockRetrievalProvider)(nil).ListDeals)) -} - -// OnReady mocks base method. -func (m *MockRetrievalProvider) OnReady(arg0 shared.ReadyFunc) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "OnReady", arg0) -} - -// OnReady indicates an expected call of OnReady. -func (mr *MockRetrievalProviderMockRecorder) OnReady(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnReady", reflect.TypeOf((*MockRetrievalProvider)(nil).OnReady), arg0) -} - -// SetAsk mocks base method. -func (m *MockRetrievalProvider) SetAsk(arg0 *retrievalmarket.Ask) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetAsk", arg0) -} - -// SetAsk indicates an expected call of SetAsk. -func (mr *MockRetrievalProviderMockRecorder) SetAsk(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetAsk", reflect.TypeOf((*MockRetrievalProvider)(nil).SetAsk), arg0) -} - -// Start mocks base method. -func (m *MockRetrievalProvider) Start(arg0 context.Context) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Start", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Start indicates an expected call of Start. -func (mr *MockRetrievalProviderMockRecorder) Start(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockRetrievalProvider)(nil).Start), arg0) -} - -// Stop mocks base method. -func (m *MockRetrievalProvider) Stop() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Stop") - ret0, _ := ret[0].(error) - return ret0 -} - -// Stop indicates an expected call of Stop. -func (mr *MockRetrievalProviderMockRecorder) Stop() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockRetrievalProvider)(nil).Stop)) -} - -// SubscribeToEvents mocks base method. -func (m *MockRetrievalProvider) SubscribeToEvents(arg0 retrievalmarket.ProviderSubscriber) retrievalmarket.Unsubscribe { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SubscribeToEvents", arg0) - ret0, _ := ret[0].(retrievalmarket.Unsubscribe) - return ret0 -} - -// SubscribeToEvents indicates an expected call of SubscribeToEvents. -func (mr *MockRetrievalProviderMockRecorder) SubscribeToEvents(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscribeToEvents", reflect.TypeOf((*MockRetrievalProvider)(nil).SubscribeToEvents), arg0) -} - -// SubscribeToQueryEvents mocks base method. -func (m *MockRetrievalProvider) SubscribeToQueryEvents(arg0 retrievalmarket.ProviderQueryEventSubscriber) retrievalmarket.Unsubscribe { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SubscribeToQueryEvents", arg0) - ret0, _ := ret[0].(retrievalmarket.Unsubscribe) - return ret0 -} - -// SubscribeToQueryEvents indicates an expected call of SubscribeToQueryEvents. -func (mr *MockRetrievalProviderMockRecorder) SubscribeToQueryEvents(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscribeToQueryEvents", reflect.TypeOf((*MockRetrievalProvider)(nil).SubscribeToQueryEvents), arg0) -} - -// SubscribeToValidationEvents mocks base method. -func (m *MockRetrievalProvider) SubscribeToValidationEvents(arg0 retrievalmarket.ProviderValidationSubscriber) retrievalmarket.Unsubscribe { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SubscribeToValidationEvents", arg0) - ret0, _ := ret[0].(retrievalmarket.Unsubscribe) - return ret0 -} - -// SubscribeToValidationEvents indicates an expected call of SubscribeToValidationEvents. -func (mr *MockRetrievalProviderMockRecorder) SubscribeToValidationEvents(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscribeToValidationEvents", reflect.TypeOf((*MockRetrievalProvider)(nil).SubscribeToValidationEvents), arg0) -} - -// MockSectorAccessor is a mock of SectorAccessor interface. -type MockSectorAccessor struct { - ctrl *gomock.Controller - recorder *MockSectorAccessorMockRecorder -} - -// MockSectorAccessorMockRecorder is the mock recorder for MockSectorAccessor. -type MockSectorAccessorMockRecorder struct { - mock *MockSectorAccessor -} - -// NewMockSectorAccessor creates a new mock instance. -func NewMockSectorAccessor(ctrl *gomock.Controller) *MockSectorAccessor { - mock := &MockSectorAccessor{ctrl: ctrl} - mock.recorder = &MockSectorAccessorMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockSectorAccessor) EXPECT() *MockSectorAccessorMockRecorder { - return m.recorder -} - -// IsUnsealed mocks base method. -func (m *MockSectorAccessor) IsUnsealed(arg0 context.Context, arg1 abi.SectorNumber, arg2, arg3 abi.UnpaddedPieceSize) (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IsUnsealed", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// IsUnsealed indicates an expected call of IsUnsealed. -func (mr *MockSectorAccessorMockRecorder) IsUnsealed(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsUnsealed", reflect.TypeOf((*MockSectorAccessor)(nil).IsUnsealed), arg0, arg1, arg2, arg3) -} - -// UnsealSector mocks base method. -func (m *MockSectorAccessor) UnsealSector(arg0 context.Context, arg1 abi.SectorNumber, arg2, arg3 abi.UnpaddedPieceSize) (io.ReadCloser, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UnsealSector", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(io.ReadCloser) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UnsealSector indicates an expected call of UnsealSector. -func (mr *MockSectorAccessorMockRecorder) UnsealSector(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnsealSector", reflect.TypeOf((*MockSectorAccessor)(nil).UnsealSector), arg0, arg1, arg2, arg3) -} diff --git a/retrievalmarket/server/gsunpaidretrieval_test.go b/retrievalmarket/server/gsunpaidretrieval_test.go index 36f8ee4c4..45b1d421c 100644 --- a/retrievalmarket/server/gsunpaidretrieval_test.go +++ b/retrievalmarket/server/gsunpaidretrieval_test.go @@ -1,332 +1,411 @@ package server import ( + "context" + "errors" + "fmt" + "io" + "os" + "strings" "testing" + "time" + "github.com/dustin/go-humanize" + graphsyncimpl "github.com/filecoin-project/boost-graphsync/impl" + gsnet "github.com/filecoin-project/boost-graphsync/network" + "github.com/filecoin-project/boost-graphsync/storeutil" + clinode "github.com/filecoin-project/boost/cli/node" + "github.com/filecoin-project/boost/datatransfer" + dtgstransport "github.com/filecoin-project/boost/datatransfer/transport/graphsync" + "github.com/filecoin-project/boost/markets/utils" + "github.com/filecoin-project/boost/piecedirectory" + gsclient "github.com/filecoin-project/boost/retrievalmarket/client" + "github.com/filecoin-project/boost/retrievalmarket/testutil" + "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" + bdclientutil "github.com/filecoin-project/boostd-data/clientutil" + "github.com/filecoin-project/boostd-data/model" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" + lotusmocks "github.com/filecoin-project/lotus/api/mocks" + test "github.com/filecoin-project/lotus/chain/events/state/mock" + "github.com/golang/mock/gomock" + "github.com/google/uuid" + "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/boxo/exchange/offline" + "github.com/ipfs/boxo/ipld/merkledag" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + blocksutil "github.com/ipfs/go-ipfs-blocksutil" logging "github.com/ipfs/go-log/v2" + "github.com/ipld/go-car/v2" + "github.com/ipld/go-ipld-prime" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/traversal" + "github.com/ipld/go-ipld-prime/traversal/selector" + "github.com/ipld/go-ipld-prime/traversal/selector/builder" + "github.com/stretchr/testify/require" + "golang.org/x/term" ) -//var tlog = logging.Logger("testgs") +var tlog = logging.Logger("testgs") -//type testCase struct { -// name string -// reqPayloadCid cid.Cid -// watch func(client legacyretrievaltypes.RetrievalClient, gsupr *GraphsyncUnpaidRetrieval) -// ask *legacyretrievaltypes.Ask -// noUnsealedCopy bool -// expectErr bool -// expectClientCancelEvent bool -// expectProviderCancelEvent bool -// expectRejection string -//} +type testCase struct { + name string + reqPayloadCid cid.Cid + watch func(client *gsclient.Client, gsupr *GraphsyncUnpaidRetrieval) + ask *legacyretrievaltypes.Ask + noUnsealedCopy bool + expectErr bool + expectClientCancelEvent bool + expectProviderCancelEvent bool + expectRejection string +} -//var providerCancelled = errors.New("provider cancelled") -//var clientCancelled = errors.New("client cancelled") -//var clientRejected = errors.New("client received reject response") +var providerCancelled = errors.New("provider cancelled") +var clientCancelled = errors.New("client cancelled") +var clientRejected = errors.New("client received reject response") func TestGS(t *testing.T) { - t.Skip("refactor tests to use boost client") + //t.Skip("refactor tests to use boost client") //_ = logging.SetLogLevel("testgs", "debug") _ = logging.SetLogLevel("testgs", "info") - //_ = logging.SetLogLevel("dt-impl", "debug") - - //missingCid := cid.MustParse("baguqeeraaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") - // - //testCases := []testCase{{ - // name: "happy path", - //}, { - // name: "request missing payload cid", - // reqPayloadCid: missingCid, - // expectErr: true, - //}, { - // name: "request for piece with no unsealed sectors", - // noUnsealedCopy: true, - // expectErr: true, - // expectRejection: "no unsealed piece", - //}, { - // name: "request for non-zero price per byte", - // ask: &legacyretrievaltypes.Ask{ - // UnsealPrice: abi.NewTokenAmount(0), - // PricePerByte: abi.NewTokenAmount(1), - // }, - // expectErr: true, - // expectRejection: "ask price is non-zero", - //}, { - // // Note: we disregard the unseal price because we only serve deals - // // with an unsealed piece, so the unseal price is irrelevant. - // // Therefore the retrieval should succeed for non-zero unseal price. - // name: "request for non-zero unseal price", - // ask: &legacyretrievaltypes.Ask{ - // UnsealPrice: abi.NewTokenAmount(1), - // PricePerByte: abi.NewTokenAmount(0), - // }, - //}, { - // name: "cancel request after sending 2 blocks", - // watch: func(client legacyretrievaltypes.RetrievalClient, gsupr *GraphsyncUnpaidRetrieval) { - // count := 0 - // gsupr.outgoingBlockHook = func(state *retrievalState) { - // count++ - // if count == 2 { - // tlog.Debug("cancelling client deal") - // err := client.CancelDeal(state.mkts.ID) - // require.NoError(t, err) - // } - // if count == 10 { - // tlog.Warn("sending last block but client cancel hasn't arrived yet") - // } - // } - // }, - // expectClientCancelEvent: true, - // expectProviderCancelEvent: true, - //}, { - // name: "provider cancel request after sending 2 blocks", - // watch: func(client legacyretrievaltypes.RetrievalClient, gsupr *GraphsyncUnpaidRetrieval) { - // count := 0 - // gsupr.outgoingBlockHook = func(state *retrievalState) { - // count++ - // if count == 2 { - // tlog.Debug("provider cancelling client deal") - // err := gsupr.CancelTransfer(context.TODO(), state.cs.transferID, &state.cs.recipient) - // require.NoError(t, err) - // } - // if count == 10 { - // tlog.Warn("sending last block but client cancel hasn't arrived yet") - // } - // } - // }, - // expectErr: true, - // expectClientCancelEvent: true, - //}, { - // name: "provider cancel request after sending 2 blocks without peer id", - // watch: func(client legacyretrievaltypes.RetrievalClient, gsupr *GraphsyncUnpaidRetrieval) { - // count := 0 - // gsupr.outgoingBlockHook = func(state *retrievalState) { - // count++ - // if count == 2 { - // tlog.Debug("provider cancelling client deal") - // err := gsupr.CancelTransfer(context.TODO(), state.cs.transferID, nil) - // require.NoError(t, err) - // } - // if count == 10 { - // tlog.Warn("sending last block but client cancel hasn't arrived yet") - // } - // } - // }, - // expectErr: true, - // expectClientCancelEvent: true, - //}} - // - //for _, tc := range testCases { - // t.Run(tc.name, func(t *testing.T) { - // runRequestTest(t, tc) - // }) - //} + _ = logging.SetLogLevel("dt-impl", "debug") + + missingCid := cid.MustParse("baguqeeraaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + + testCases := []testCase{{ + name: "happy path", + }, { + name: "request missing payload cid", + reqPayloadCid: missingCid, + expectErr: true, + }, { + name: "request for piece with no unsealed sectors", + noUnsealedCopy: true, + expectErr: true, + expectRejection: "no unsealed piece", + }, { + name: "request for non-zero price per byte", + ask: &legacyretrievaltypes.Ask{ + UnsealPrice: abi.NewTokenAmount(0), + PricePerByte: abi.NewTokenAmount(1), + }, + expectErr: true, + expectRejection: "ask price is non-zero", + }, { + // Note: we disregard the unseal price because we only serve deals + // with an unsealed piece, so the unseal price is irrelevant. + // Therefore the retrieval should succeed for non-zero unseal price. + name: "request for non-zero unseal price", + ask: &legacyretrievaltypes.Ask{ + UnsealPrice: abi.NewTokenAmount(1), + PricePerByte: abi.NewTokenAmount(0), + }, + }, { + name: "provider cancel request after sending 2 blocks", + watch: func(client *gsclient.Client, gsupr *GraphsyncUnpaidRetrieval) { + count := 0 + gsupr.outgoingBlockHook = func(state *retrievalState) { + count++ + if count == 2 { + tlog.Debug("provider cancelling client deal") + err := gsupr.CancelTransfer(context.TODO(), state.cs.transferID, &state.cs.recipient) + require.NoError(t, err) + } + if count == 10 { + tlog.Warn("sending last block but client cancel hasn't arrived yet") + } + } + }, + expectErr: true, + expectClientCancelEvent: true, + }, { + name: "provider cancel request after sending 2 blocks without peer id", + watch: func(client *gsclient.Client, gsupr *GraphsyncUnpaidRetrieval) { + count := 0 + gsupr.outgoingBlockHook = func(state *retrievalState) { + count++ + if count == 2 { + tlog.Debug("provider cancelling client deal") + err := gsupr.CancelTransfer(context.TODO(), state.cs.transferID, nil) + require.NoError(t, err) + } + if count == 10 { + tlog.Warn("sending last block but client cancel hasn't arrived yet") + } + } + }, + expectErr: true, + expectClientCancelEvent: true, + }} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + runRequestTest(t, tc) + }) + } +} + +func runRequestTest(t *testing.T, tc testCase) { + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + // Create a CAR file and set up mocks + testData := testutil.NewLibp2pTestData(ctx, t) + + carRootCid, carFilePath := piecedirectory.CreateCarFile(t) + carFile, err := os.Open(carFilePath) + require.NoError(t, err) + defer carFile.Close() + + // Create a random CAR file + carReader, err := car.OpenReader(carFilePath) + require.NoError(t, err) + defer carReader.Close() + carv1Reader, err := carReader.DataReader() + require.NoError(t, err) + + // Any calls to get a reader over data should return a reader over the random CAR file + pr := piecedirectory.CreateMockPieceReader(t, carv1Reader) + + carv1Bytes, err := io.ReadAll(carv1Reader) + require.NoError(t, err) + carSize := len(carv1Bytes) + + maddr := address.TestAddress + pieceCid := GenerateCids(1)[0] + sectorID := abi.SectorNumber(1) + offset := abi.PaddedPieceSize(0) + dealInfo := model.DealInfo{ + DealUuid: uuid.New().String(), + ChainDealID: abi.DealID(1), + MinerAddr: maddr, + SectorID: sectorID, + PieceOffset: offset, + PieceLength: abi.UnpaddedPieceSize(carSize).Padded(), + } + + cl := bdclientutil.NewTestStore(ctx) + defer cl.Close(ctx) + + pd := piecedirectory.NewPieceDirectory(cl, pr, 1) + pd.Start(ctx) + err = pd.AddDealForPiece(ctx, pieceCid, dealInfo) + require.NoError(t, err) + + sa := &mockSectorAccessor{ + unsealed: !tc.noUnsealedCopy, + } + vdeps := ValidationDeps{ + PieceDirectory: pd, + SectorAccessor: sa, + AskStore: NewRetrievalAskGetter(), + } + + // Create a blockstore over the CAR file blocks + carDataBs, err := pd.GetBlockstore(ctx, pieceCid) + require.NoError(t, err) + + // Wrap graphsync with the graphsync unpaid retrieval interceptor + linkSystem2 := storeutil.LinkSystemForBlockstore(carDataBs) + gs2 := graphsyncimpl.New(ctx, gsnet.NewFromLibp2pHost(testData.Host2), linkSystem2) + gsupr, err := NewGraphsyncUnpaidRetrieval(testData.Host2.ID(), gs2, testData.DTNet2, vdeps) + require.NoError(t, err) + + ctrl := gomock.NewController(t) + fn := lotusmocks.NewMockFullNode(ctrl) + peerID := testData.Host2.ID() + var maddrs []abi.Multiaddrs + for _, mma := range testData.Host2.Addrs() { + maddrs = append(maddrs, mma.Bytes()) + } + minfo := api.MinerInfo{ + PeerId: &peerID, + Multiaddrs: maddrs, + Worker: address.TestAddress2, + } + fn.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(minfo, nil).AnyTimes() + chainHead, err := test.MockTipset(maddr, 1) + require.NoError(t, err) + fn.EXPECT().ChainHead(gomock.Any()).Return(chainHead, nil).AnyTimes() + + queryHandler := NewQueryAskHandler(testData.Host2, maddr, pd, sa, NewRetrievalAskGetter(), fn) + queryHandler.Start() + defer queryHandler.Stop() + + // Create a Graphsync transport and call SetEventHandler, which registers + // listeners for all the Graphsync hooks. + gsTransport := dtgstransport.NewTransport(testData.Host2.ID(), gsupr) + err = gsTransport.SetEventHandler(nil) + require.NoError(t, err) + + gsupr.SubscribeToDataTransferEvents(func(event datatransfer.Event, channelState datatransfer.ChannelState) { + tlog.Debugf("prov dt: %s %s / %s", datatransfer.Events[event.Code], event.Message, datatransfer.Statuses[channelState.Status()]) + }) + err = gsupr.Start(ctx) + require.NoError(t, err) + + client := newTestClient(t, testData, fn) + + if tc.watch != nil { + tc.watch(client, gsupr) + } + + // Watch for provider completion + providerResChan := make(chan error) + gsupr.SubscribeToMarketsEvents(func(event legacyretrievaltypes.ProviderEvent, state legacyretrievaltypes.ProviderDealState) { + tlog.Debugf("prov mkt: %s %s %s", legacyretrievaltypes.ProviderEvents[event], state.Status.String(), state.Message) + switch event { + case legacyretrievaltypes.ProviderEventComplete: + providerResChan <- nil + case legacyretrievaltypes.ProviderEventCancelComplete: + providerResChan <- providerCancelled + case legacyretrievaltypes.ProviderEventDataTransferError: + providerResChan <- errors.New(state.Message) + } + }) + + // Retrieve the data + tlog.Infof("Retrieve cid %s from peer %s", carRootCid, client.ClientAddr.String()) + // Use an explore-all but add unixfs-preload to make sure we have UnixFS + // ADL support wired up. + ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) + sel := ssb.ExploreInterpretAs("unixfs-preload", ssb.ExploreRecursive( + selector.RecursionLimitNone(), + ssb.ExploreAll(ssb.ExploreRecursiveEdge()), + )).Node() + + query, err := client.RetrievalQuery(ctx, maddr, pieceCid) + require.NoError(t, err) + + proposal, err := gsclient.RetrievalProposalForAsk(query, carRootCid, sel) + require.NoError(t, err) + + // Retrieve the data + _, err = client.RetrieveContentWithProgressCallback( + ctx, + maddr, + proposal, + func(bytesReceived_ uint64) { + printProgress(bytesReceived_) + }, + ) + require.NoError(t, err) + + dservOffline := merkledag.NewDAGService(blockservice.New(testData.Bs1, offline.Exchange(testData.Bs1))) + + // if we used a selector - need to find the sub-root the user actually wanted to retrieve + if sel != nil { + var subRootFound bool + err = utils.TraverseDag( + ctx, + dservOffline, + carRootCid, + sel, + func(p traversal.Progress, n ipld.Node, r traversal.VisitReason) error { + if r == traversal.VisitReason_SelectionMatch { + + require.Equal(t, p.LastBlock.Path.String(), p.Path.String()) + + cidLnk, castOK := p.LastBlock.Link.(cidlink.Link) + require.True(t, castOK) + + carRootCid = cidLnk.Cid + subRootFound = true + } + return nil + }, + ) + require.NoError(t, err) + + require.True(t, subRootFound) + } + + // Wait for provider completion + err = waitFor(ctx, t, providerResChan) + if tc.expectErr || tc.expectProviderCancelEvent { + require.Error(t, err) + if tc.expectProviderCancelEvent { + require.EqualError(t, err, providerCancelled.Error()) + } + } else { + require.NoError(t, err) + } + + //final verification -- the server has no active graphsync requests + stats := gsupr.GraphExchange.Stats() + require.Equal(t, stats.IncomingRequests.Active, uint64(0)) +} + +func newTestClient(t *testing.T, testData *testutil.Libp2pTestData, full api.FullNode) *gsclient.Client { + clientPath, err := os.MkdirTemp(t.TempDir(), "client") + require.NoError(t, err) + + clientNode, err := clinode.Setup(clientPath) + require.NoError(t, err) + clientNode.Host = testData.Host1 + //err = clientNode.Wallet.SetDefault(address.TestAddress2) + //require.NoError(t, err) + clientDs := namespace.Wrap(testData.Ds1, datastore.NewKey("/retrievals/client")) + addr, err := clientNode.Wallet.GetDefault() + require.NoError(t, err) + + // Create the retrieval client + fc, err := gsclient.NewClient(clientNode.Host, full, clientNode.Wallet, addr, testData.Bs1, clientDs, clientPath) + require.NoError(t, err) + return fc +} + +func waitFor(ctx context.Context, t *testing.T, resChan chan error) error { + var err error + select { + case <-ctx.Done(): + require.Fail(t, "test timed out") + case err = <-resChan: + } + return err +} + +type mockSectorAccessor struct { + unsealed bool +} + +func (m *mockSectorAccessor) IsUnsealed(ctx context.Context, minerAddr address.Address, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (bool, error) { + return m.unsealed, nil +} + +func printProgress(bytesReceived uint64) { + str := fmt.Sprintf("%v (%v)", bytesReceived, humanize.IBytes(bytesReceived)) + + termWidth, _, err := term.GetSize(int(os.Stdin.Fd())) + strLen := len(str) + if err == nil { + + if strLen < termWidth { + // If the string is shorter than the terminal width, pad right side + // with spaces to remove old text + str = strings.Join([]string{str, strings.Repeat(" ", termWidth-strLen)}, "") + } else if strLen > termWidth { + // If the string doesn't fit in the terminal, cut it down to a size + // that fits + str = str[:termWidth] + } + } + + fmt.Fprintf(os.Stderr, "%s\r", str) +} + +// GenerateCids produces n content identifiers. +func GenerateCids(n int) []cid.Cid { + cids := make([]cid.Cid, 0, n) + for i := 0; i < n; i++ { + c := blockGenerator.Next().Cid() + cids = append(cids, c) + } + return cids } -//func runRequestTest(t *testing.T, tc testCase) { -// ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) -// defer cancel() -// -// // Create a CAR file and set up mocks -// testData := shared_tut.NewLibp2pTestData(ctx, t) -// -// carRootCid, carFilePath := piecedirectory.CreateCarFile(t) -// carFile, err := os.Open(carFilePath) -// require.NoError(t, err) -// defer carFile.Close() -// -// // Create a random CAR file -// carReader, err := car.OpenReader(carFilePath) -// require.NoError(t, err) -// defer carReader.Close() -// carv1Reader, err := carReader.DataReader() -// require.NoError(t, err) -// -// // Any calls to get a reader over data should return a reader over the random CAR file -// pr := piecedirectory.CreateMockPieceReader(t, carv1Reader) -// -// carv1Bytes, err := io.ReadAll(carv1Reader) -// require.NoError(t, err) -// carSize := len(carv1Bytes) -// -// maddr := address.TestAddress -// pieceCid := shared_tut.GenerateCids(1)[0] -// sectorID := abi.SectorNumber(1) -// offset := abi.PaddedPieceSize(0) -// dealInfo := model.DealInfo{ -// DealUuid: uuid.New().String(), -// ChainDealID: abi.DealID(1), -// MinerAddr: maddr, -// SectorID: sectorID, -// PieceOffset: offset, -// PieceLength: abi.UnpaddedPieceSize(carSize).Padded(), -// } -// -// cl := bdclientutil.NewTestStore(ctx) -// defer cl.Close(ctx) -// -// pd := piecedirectory.NewPieceDirectory(cl, pr, 1) -// pd.Start(ctx) -// err = pd.AddDealForPiece(ctx, pieceCid, dealInfo) -// require.NoError(t, err) -// -// vdeps := ValidationDeps{ -// PieceDirectory: pd, -// SectorAccessor: &mockSectorAccessor{ -// unsealed: !tc.noUnsealedCopy, -// }, -// AskStore: modules.NewRetrievalAskGetter(), -// } -// -// // Create a blockstore over the CAR file blocks -// carDataBs, err := pd.GetBlockstore(ctx, pieceCid) -// require.NoError(t, err) -// -// // Wrap graphsync with the graphsync unpaid retrieval interceptor -// linkSystem2 := storeutil.LinkSystemForBlockstore(carDataBs) -// gs2 := graphsyncimpl.New(ctx, network.NewFromLibp2pHost(testData.Host2), linkSystem2) -// gsupr, err := NewGraphsyncUnpaidRetrieval(testData.Host2.ID(), gs2, testData.DTNet2, vdeps) -// require.NoError(t, err) -// -// // Create a Graphsync transport and call SetEventHandler, which registers -// // listeners for all the Graphsync hooks. -// gsTransport := dtgstransport.NewTransport(testData.Host2.ID(), gsupr) -// err = gsTransport.SetEventHandler(nil) -// require.NoError(t, err) -// -// // Create the retrieval provider with the graphsync unpaid retrieval interceptor -// paymentAddress := address.TestAddress2 -// -// gsupr.SubscribeToDataTransferEvents(func(event datatransfer.Event, channelState datatransfer.ChannelState) { -// tlog.Debugf("prov dt: %s %s / %s", datatransfer.Events[event.Code], event.Message, datatransfer.Statuses[channelState.Status()]) -// }) -// err = gsupr.Start(ctx) -// require.NoError(t, err) -// -// // Create a retrieval client -// retrievalPeer := legacyretrievaltypes.RetrievalPeer{ -// Address: paymentAddress, -// ID: testData.Host2.ID(), -// } -// retrievalClientNode := testnodes.NewTestRetrievalClientNode(testnodes.TestRetrievalClientNodeParams{}) -// retrievalClientNode.ExpectKnownAddresses(retrievalPeer, nil) -// client := createRetrievalClient(ctx, t, testData, retrievalClientNode) -// shared_tut.StartAndWaitForReady(ctx, t, client) -// -// if tc.watch != nil { -// tc.watch(client, gsupr) -// } -// -// // Watch for provider completion -// providerResChan := make(chan error) -// gsupr.SubscribeToMarketsEvents(func(event legacyretrievaltypes.ProviderEvent, state legacyretrievaltypes.ProviderDealState) { -// tlog.Debugf("prov mkt: %s %s %s", legacyretrievaltypes.ProviderEvents[event], state.Status.String(), state.Message) -// switch event { -// case legacyretrievaltypes.ProviderEventComplete: -// providerResChan <- nil -// case legacyretrievaltypes.ProviderEventCancelComplete: -// providerResChan <- providerCancelled -// case legacyretrievaltypes.ProviderEventDataTransferError: -// providerResChan <- errors.New(state.Message) -// } -// }) -// -// // Watch for client completion -// clientResChan := make(chan error) -// client.SubscribeToEvents(func(event legacyretrievaltypes.ClientEvent, state legacyretrievaltypes.ClientDealState) { -// tlog.Debugf("clnt mkt: %s %s %s", event.String(), state.Status.String(), state.Message) -// switch event { -// case legacyretrievaltypes.ClientEventComplete: -// clientResChan <- nil -// case legacyretrievaltypes.ClientEventCancelComplete: -// clientResChan <- clientCancelled -// case legacyretrievaltypes.ClientEventDealRejected: -// clientResChan <- fmt.Errorf("%s :%w", state.Message, clientRejected) -// case legacyretrievaltypes.ClientEventDataTransferError: -// clientResChan <- errors.New(state.Message) -// } -// }) -// -// // Retrieve the data -// tlog.Infof("Retrieve cid %s from peer %s", carRootCid, retrievalPeer.ID) -// // Use an explore-all but add unixfs-preload to make sure we have UnixFS -// // ADL support wired up. -// ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) -// sel := ssb.ExploreInterpretAs("unixfs-preload", ssb.ExploreRecursive( -// selector.RecursionLimitNone(), -// ssb.ExploreAll(ssb.ExploreRecursiveEdge()), -// )).Node() -// params, err := legacyretrievaltypes.NewParamsV1(abi.NewTokenAmount(0), 0, 0, sel, nil, abi.NewTokenAmount(0)) -// require.NoError(t, err) -// if tc.reqPayloadCid != cid.Undef { -// carRootCid = tc.reqPayloadCid -// } -// _, err = client.Retrieve(ctx, 1, carRootCid, params, abi.NewTokenAmount(0), retrievalPeer, address.TestAddress, address.TestAddress2) -// require.NoError(t, err) -// -// // Wait for provider completion -// err = waitFor(ctx, t, providerResChan) -// if tc.expectErr || tc.expectProviderCancelEvent { -// require.Error(t, err) -// if tc.expectProviderCancelEvent { -// require.EqualError(t, err, providerCancelled.Error()) -// } -// } else { -// require.NoError(t, err) -// } -// -// // Wait for client completion -// err = waitFor(ctx, t, clientResChan) -// if tc.expectErr || tc.expectClientCancelEvent { -// require.Error(t, err) -// if tc.expectClientCancelEvent { -// require.EqualError(t, err, clientCancelled.Error()) -// } else if tc.expectRejection != "" { -// require.ErrorContains(t, err, tc.expectRejection) -// } -// } else { -// require.NoError(t, err) -// } -// -// // final verification -- the server has no active graphsync requests -// stats := gsupr.GraphExchange.Stats() -// require.Equal(t, stats.IncomingRequests.Active, uint64(0)) -//} -// -//func createRetrievalClient(ctx context.Context, t *testing.T, testData *shared_tut.Libp2pTestData, retrievalClientNode *testnodes.TestRetrievalClientNode) legacyretrievaltypes.RetrievalClient { -// nw1 := rmnet.NewFromLibp2pHost(testData.Host1, rmnet.RetryParameters(0, 0, 0, 0)) -// gs1 := graphsyncimpl.New(ctx, network.NewFromLibp2pHost(testData.Host1), testData.LinkSystem1) -// dtTransport1 := dtgstransport.NewTransport(testData.Host1.ID(), gs1) -// dt1, err := dtimpl.NewDataTransfer(testData.DTStore1, testData.DTNet1, dtTransport1) -// require.NoError(t, err) -// testutil.StartAndWaitForReady(ctx, t, dt1) -// require.NoError(t, err) -// clientDs := namespace.Wrap(testData.Ds1, datastore.NewKey("/retrievals/client")) -// ba := tut.NewTestRetrievalBlockstoreAccessor() -// client, err := retrievalimpl.NewClient(nw1, dt1, retrievalClientNode, &tut.TestPeerResolver{}, clientDs, ba) -// require.NoError(t, err) -// -// dt1.SubscribeToEvents(func(event datatransfer.Event, channelState datatransfer.ChannelState) { -// tlog.Debugf("client dt: %s %s / %s", datatransfer.Events[event.Code], event.Message, datatransfer.Statuses[channelState.Status()]) -// }) -// -// return client -//} -// -//func waitFor(ctx context.Context, t *testing.T, resChan chan error) error { -// var err error -// select { -// case <-ctx.Done(): -// require.Fail(t, "test timed out") -// case err = <-resChan: -// } -// return err -//} -// -//type mockSectorAccessor struct { -// unsealed bool -//} -// -//func (m *mockSectorAccessor) IsUnsealed(ctx context.Context, minerAddr address.Address, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (bool, error) { -// return m.unsealed, nil -//} +var blockGenerator = blocksutil.NewBlockGenerator() diff --git a/retrievalmarket/testutil/testutil.go b/retrievalmarket/testutil/testutil.go new file mode 100644 index 000000000..0655216e7 --- /dev/null +++ b/retrievalmarket/testutil/testutil.go @@ -0,0 +1,100 @@ +package testutil + +import ( + "context" + "io/ioutil" + "os" + "testing" + + "github.com/filecoin-project/boost-graphsync/storeutil" + dtnet "github.com/filecoin-project/boost/datatransfer/network" + "github.com/ipfs/boxo/blockservice" + bstore "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/exchange/offline" + "github.com/ipfs/boxo/ipld/merkledag" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + dss "github.com/ipfs/go-datastore/sync" + ipldformat "github.com/ipfs/go-ipld-format" + "github.com/ipld/go-ipld-prime" + "github.com/libp2p/go-libp2p/core/host" + mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" + "github.com/stretchr/testify/require" +) + +type Libp2pTestData struct { + Ctx context.Context + Ds1 datastore.Batching + Ds2 datastore.Batching + Bs1 bstore.Blockstore + Bs2 bstore.Blockstore + DagService1 ipldformat.DAGService + DagService2 ipldformat.DAGService + DTNet1 dtnet.DataTransferNetwork + DTNet2 dtnet.DataTransferNetwork + DTStore1 datastore.Batching + DTStore2 datastore.Batching + DTTmpDir1 string + DTTmpDir2 string + LinkSystem1 ipld.LinkSystem + LinkSystem2 ipld.LinkSystem + Host1 host.Host + Host2 host.Host + OrigBytes []byte + + MockNet mocknet.Mocknet +} + +func NewLibp2pTestData(ctx context.Context, t *testing.T) *Libp2pTestData { + testData := &Libp2pTestData{} + testData.Ctx = ctx + + var err error + + testData.Ds1 = dss.MutexWrap(datastore.NewMapDatastore()) + testData.Ds2 = dss.MutexWrap(datastore.NewMapDatastore()) + + // make a bstore and dag service + testData.Bs1 = bstore.NewBlockstore(testData.Ds1) + testData.Bs2 = bstore.NewBlockstore(testData.Ds2) + + testData.DagService1 = merkledag.NewDAGService(blockservice.New(testData.Bs1, offline.Exchange(testData.Bs1))) + testData.DagService2 = merkledag.NewDAGService(blockservice.New(testData.Bs2, offline.Exchange(testData.Bs2))) + + // setup an IPLD link system for bstore 1 + testData.LinkSystem1 = storeutil.LinkSystemForBlockstore(testData.Bs1) + + // setup an IPLD link system for bstore 2 + testData.LinkSystem2 = storeutil.LinkSystemForBlockstore(testData.Bs2) + + mn := mocknet.New() + + // setup network + testData.Host1, err = mn.GenPeer() + require.NoError(t, err) + + testData.Host2, err = mn.GenPeer() + require.NoError(t, err) + + err = mn.LinkAll() + require.NoError(t, err) + + testData.DTNet1 = dtnet.NewFromLibp2pHost(testData.Host1) + testData.DTNet2 = dtnet.NewFromLibp2pHost(testData.Host2) + + testData.DTStore1 = namespace.Wrap(testData.Ds1, datastore.NewKey("DataTransfer1")) + testData.DTStore2 = namespace.Wrap(testData.Ds1, datastore.NewKey("DataTransfer2")) + + testData.DTTmpDir1, err = ioutil.TempDir("", "dt-tmp-1") + require.NoError(t, err) + testData.DTTmpDir2, err = ioutil.TempDir("", "dt-tmp-2") + require.NoError(t, err) + t.Cleanup(func() { + _ = os.RemoveAll(testData.DTTmpDir1) + _ = os.RemoveAll(testData.DTTmpDir2) + }) + + testData.MockNet = mn + + return testData +} From b316c83686387585621d0ae7609b4b56d5a5403e Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Mon, 4 Dec 2023 20:30:54 +0400 Subject: [PATCH 17/34] fix selector type --- go.mod | 2 +- go.sum | 2 - retrievalmarket/server/gsunpaidretrieval.go | 10 +- retrievalmarket/server/validation.go | 41 +++-- .../types/legacyretrievaltypes/bindoptions.go | 164 ++++++++++++++++++ .../migrations/migrations.go | 2 +- .../types/legacyretrievaltypes/types.go | 82 ++++++++- .../types/legacyretrievaltypes/types.ipldsch | 52 ++++++ .../legacyretrievaltypes/types_cbor_gen.go | 21 ++- 9 files changed, 334 insertions(+), 42 deletions(-) create mode 100644 retrievalmarket/types/legacyretrievaltypes/bindoptions.go create mode 100644 retrievalmarket/types/legacyretrievaltypes/types.ipldsch diff --git a/go.mod b/go.mod index 55246a2f9..5e8e02008 100644 --- a/go.mod +++ b/go.mod @@ -324,7 +324,6 @@ require ( require ( github.com/filecoin-project/boost-gfm v1.26.7 github.com/filecoin-project/boost-graphsync v0.13.9 - github.com/filecoin-project/go-fil-markets v1.28.3 github.com/filecoin-project/lotus v1.23.4-rc1 github.com/ipfs/boxo v0.12.0 github.com/ipfs/kubo v0.22.0 @@ -342,6 +341,7 @@ require ( github.com/Jorropo/jsync v1.0.1 // indirect github.com/filecoin-project/go-data-transfer v1.15.4-boost // indirect github.com/filecoin-project/go-data-transfer/v2 v2.0.0-rc7 // indirect + github.com/filecoin-project/go-fil-markets v1.28.3 // indirect github.com/filecoin-project/kubo-api-client v0.0.2-0.20230829103503-14448166d14d // indirect github.com/gammazero/channelqueue v0.2.1 // indirect github.com/gammazero/deque v0.2.1 // indirect diff --git a/go.sum b/go.sum index e3644eeaf..2b9f602af 100644 --- a/go.sum +++ b/go.sum @@ -377,7 +377,6 @@ github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psS github.com/filecoin-project/go-state-types v0.1.6/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q= github.com/filecoin-project/go-state-types v0.1.10/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q= github.com/filecoin-project/go-state-types v0.11.1/go.mod h1:SyNPwTsU7I22gL2r0OAPcImvLoTVfgRwdK/Y5rR1zz8= -github.com/filecoin-project/go-state-types v0.11.2-0.20230712101859-8f37624fa540/go.mod h1:SyNPwTsU7I22gL2r0OAPcImvLoTVfgRwdK/Y5rR1zz8= github.com/filecoin-project/go-state-types v0.12.1 h1:/1ip/jXIP4QzWd3hlaQ7RGp1DHKKYG3+NOhd/r08UJY= github.com/filecoin-project/go-state-types v0.12.1/go.mod h1:KOBGyvCalT8uHBS7KSKOVbjsilD90bBZHgLAqrzz6gU= github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= @@ -2497,7 +2496,6 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= diff --git a/retrievalmarket/server/gsunpaidretrieval.go b/retrievalmarket/server/gsunpaidretrieval.go index 5ebea3236..2adec3999 100644 --- a/retrievalmarket/server/gsunpaidretrieval.go +++ b/retrievalmarket/server/gsunpaidretrieval.go @@ -280,11 +280,11 @@ func (g *GraphsyncUnpaidRetrieval) interceptRetrieval(p peer.ID, request graphsy proposal := *v log.Debugw("intercepting retrieval deal", "proposal", proposal) return g.handleRetrievalDeal(p, msg, proposal, request, RetrievalTypeDeal) - case *migrations.DealProposal0: - // This is a retrieval deal with an older format - proposal := migrations.MigrateDealProposal0To1(*v) - log.Debugw("intercepting retrieval deal v1", "proposal", proposal) - return g.handleRetrievalDeal(p, msg, proposal, request, RetrievalTypeDeal) + //case *migrations.DealProposal0: + // // This is a retrieval deal with an older format + // proposal := migrations.MigrateDealProposal0To1(*v) + // log.Debugw("intercepting retrieval deal v1", "proposal", proposal) + // return g.handleRetrievalDeal(p, msg, proposal, request, RetrievalTypeDeal) } log.Debugw("ignoring request", "request", request) diff --git a/retrievalmarket/server/validation.go b/retrievalmarket/server/validation.go index 8b9c521a1..ed4c675d0 100644 --- a/retrievalmarket/server/validation.go +++ b/retrievalmarket/server/validation.go @@ -9,7 +9,6 @@ import ( "github.com/filecoin-project/boost/datatransfer" "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" - "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes/migrations" "github.com/hannahhoward/go-pubsub" "github.com/ipfs/go-cid" "github.com/ipld/go-ipld-prime" @@ -43,13 +42,14 @@ func (rv *requestValidator) validatePullRequest(isRestart bool, receiver peer.ID proposal, ok := voucher.(*legacyretrievaltypes.DealProposal) var legacyProtocol bool if !ok { - legacyProposal, ok := voucher.(*migrations.DealProposal0) - if !ok { - return nil, errors.New("wrong voucher type") - } - newProposal := migrations.MigrateDealProposal0To1(*legacyProposal) - proposal = &newProposal - legacyProtocol = true + return nil, errors.New("wrong voucher type") + //legacyProposal, ok := voucher.(*migrations.DealProposal0) + //if !ok { + // return nil, errors.New("wrong voucher type") + //} + //newProposal := migrations.MigrateDealProposal0To1(*legacyProposal) + //proposal = &newProposal + //legacyProtocol = true } response, err := rv.validatePull(receiver, proposal, legacyProtocol, baseCid, selector) _ = rv.psub.Publish(legacyretrievaltypes.ProviderValidationEvent{ @@ -61,15 +61,15 @@ func (rv *requestValidator) validatePullRequest(isRestart bool, receiver peer.ID Response: &response, Error: err, }) - if legacyProtocol { - downgradedResponse := migrations.DealResponse0{ - Status: response.Status, - ID: response.ID, - Message: response.Message, - PaymentOwed: response.PaymentOwed, - } - return &downgradedResponse, err - } + //if legacyProtocol { + // downgradedResponse := migrations.DealResponse0{ + // Status: response.Status, + // ID: response.ID, + // Message: response.Message, + // PaymentOwed: response.PaymentOwed, + // } + // return &downgradedResponse, err + //} return &response, err } @@ -104,7 +104,12 @@ func (rv *requestValidator) acceptDeal(receiver peer.ID, proposal *legacyretriev } bytesCompare := allSelectorBytes if proposal.SelectorSpecified() { - bytesCompare = proposal.Selector.Raw + w := new(bytes.Buffer) + err = proposal.Selector.MarshalCBOR(w) + if err != nil { + return err + } + bytesCompare = w.Bytes() } if !bytes.Equal(buf.Bytes(), bytesCompare) { return errors.New("incorrect selector for this proposal") diff --git a/retrievalmarket/types/legacyretrievaltypes/bindoptions.go b/retrievalmarket/types/legacyretrievaltypes/bindoptions.go new file mode 100644 index 000000000..e95b233b7 --- /dev/null +++ b/retrievalmarket/types/legacyretrievaltypes/bindoptions.go @@ -0,0 +1,164 @@ +package legacyretrievaltypes + +import ( + "bytes" + "fmt" + "io" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + "github.com/ipld/go-ipld-prime/datamodel" + "github.com/ipld/go-ipld-prime/node/basicnode" + "github.com/ipld/go-ipld-prime/node/bindnode" + "github.com/ipld/go-ipld-prime/schema" + cbg "github.com/whyrusleeping/cbor-gen" +) + +// go type converter functions for bindnode for common Filecoin data types + +// CborGenCompatibleNodeBindnodeOption converts a CborGenCompatibleNode type to +// and from an Any field in a schema +var CborGenCompatibleNodeBindnodeOption = bindnode.TypedAnyConverter(&CborGenCompatibleNode{}, cborGenCompatibleNodeFromAny, cborGenCompatibleNodeToAny) + +// BigIntBindnodeOption converts a big.Int type to and from a Bytes field in a +// schema +var BigIntBindnodeOption = bindnode.TypedBytesConverter(&big.Int{}, bigIntFromBytes, bigIntToBytes) + +// TokenAmountBindnodeOption converts a filecoin abi.TokenAmount type to and +// from a Bytes field in a schema +var TokenAmountBindnodeOption = bindnode.TypedBytesConverter(&abi.TokenAmount{}, tokenAmountFromBytes, tokenAmountToBytes) + +// AddressBindnodeOption converts a filecoin Address type to and from a Bytes +// field in a schema +var AddressBindnodeOption = bindnode.TypedBytesConverter(&address.Address{}, addressFromBytes, addressToBytes) + +// SignatureBindnodeOption converts a filecoin Signature type to and from a +// Bytes field in a schema +var SignatureBindnodeOption = bindnode.TypedBytesConverter(&crypto.Signature{}, signatureFromBytes, signatureToBytes) + +// CborGenCompatibleNode is for cbor-gen / go-ipld-prime compatibility, to +// replace Deferred types that are used to represent datamodel.Nodes. +// This shouldn't be used as a pointer (nullable/optional) as it can consume +// "Null" tokens and therefore be a Null. Instead, use +// CborGenCompatibleNode#IsNull to check for null status. +type CborGenCompatibleNode struct { + Node datamodel.Node +} + +func (sn CborGenCompatibleNode) IsNull() bool { + return sn.Node == nil || sn.Node == datamodel.Null +} + +// UnmarshalCBOR is for cbor-gen compatibility +func (sn *CborGenCompatibleNode) UnmarshalCBOR(r io.Reader) error { + // use cbg.Deferred.UnmarshalCBOR to figure out how much to pull + def := cbg.Deferred{} + if err := def.UnmarshalCBOR(r); err != nil { + return err + } + // convert it to a Node + na := basicnode.Prototype.Any.NewBuilder() + if err := dagcbor.Decode(na, bytes.NewReader(def.Raw)); err != nil { + return err + } + sn.Node = na.Build() + return nil +} + +// MarshalCBOR is for cbor-gen compatibility +func (sn *CborGenCompatibleNode) MarshalCBOR(w io.Writer) error { + node := datamodel.Null + if sn != nil && sn.Node != nil { + node = sn.Node + if tn, ok := node.(schema.TypedNode); ok { + node = tn.Representation() + } + } + return dagcbor.Encode(node, w) +} + +func cborGenCompatibleNodeFromAny(node datamodel.Node) (interface{}, error) { + return &CborGenCompatibleNode{Node: node}, nil +} + +func cborGenCompatibleNodeToAny(iface interface{}) (datamodel.Node, error) { + sn, ok := iface.(*CborGenCompatibleNode) + if !ok { + return nil, fmt.Errorf("expected *CborGenCompatibleNode value") + } + if sn.Node == nil { + return datamodel.Null, nil + } + return sn.Node, nil +} + +func tokenAmountFromBytes(b []byte) (interface{}, error) { + return bigIntFromBytes(b) +} + +func bigIntFromBytes(b []byte) (interface{}, error) { + if len(b) == 0 { + return big.NewInt(0), nil + } + return big.FromBytes(b) +} + +func tokenAmountToBytes(iface interface{}) ([]byte, error) { + return bigIntToBytes(iface) +} + +func bigIntToBytes(iface interface{}) ([]byte, error) { + bi, ok := iface.(*big.Int) + if !ok { + return nil, fmt.Errorf("expected *big.Int value") + } + if bi == nil || bi.Int == nil { + *bi = big.Zero() + } + return bi.Bytes() +} + +func addressFromBytes(b []byte) (interface{}, error) { + return address.NewFromBytes(b) +} + +func addressToBytes(iface interface{}) ([]byte, error) { + addr, ok := iface.(*address.Address) + if !ok { + return nil, fmt.Errorf("expected *Address value") + } + return addr.Bytes(), nil +} + +// Signature is a byteprefix union +func signatureFromBytes(b []byte) (interface{}, error) { + if len(b) > crypto.SignatureMaxLength { + return nil, fmt.Errorf("string too long") + } + if len(b) == 0 { + return nil, fmt.Errorf("string empty") + } + var s crypto.Signature + switch crypto.SigType(b[0]) { + default: + return nil, fmt.Errorf("invalid signature type in cbor input: %d", b[0]) + case crypto.SigTypeSecp256k1: + s.Type = crypto.SigTypeSecp256k1 + case crypto.SigTypeBLS: + s.Type = crypto.SigTypeBLS + } + s.Data = b[1:] + return &s, nil +} + +func signatureToBytes(iface interface{}) ([]byte, error) { + s, ok := iface.(*crypto.Signature) + if !ok { + return nil, fmt.Errorf("expected *Signature value") + } + ba := append([]byte{byte(s.Type)}, s.Data...) + return ba, nil +} diff --git a/retrievalmarket/types/legacyretrievaltypes/migrations/migrations.go b/retrievalmarket/types/legacyretrievaltypes/migrations/migrations.go index 85bfdb9e2..de36a39fb 100644 --- a/retrievalmarket/types/legacyretrievaltypes/migrations/migrations.go +++ b/retrievalmarket/types/legacyretrievaltypes/migrations/migrations.go @@ -194,7 +194,7 @@ func MigrateQueryResponse0To1(oldQr QueryResponse0) legacyretrievaltypes.QueryRe // MigrateParams0To1 migrates tuple encoded deal params to map encoded deal params func MigrateParams0To1(oldParams Params0) legacyretrievaltypes.Params { return legacyretrievaltypes.Params{ - Selector: oldParams.Selector, + Selector: legacyretrievaltypes.CborGenCompatibleNode{Node: nil}, PieceCID: oldParams.PieceCID, PricePerByte: oldParams.PricePerByte, PaymentInterval: oldParams.PaymentInterval, diff --git a/retrievalmarket/types/legacyretrievaltypes/types.go b/retrievalmarket/types/legacyretrievaltypes/types.go index fe9f00bc1..465beb7db 100644 --- a/retrievalmarket/types/legacyretrievaltypes/types.go +++ b/retrievalmarket/types/legacyretrievaltypes/types.go @@ -2,6 +2,7 @@ package legacyretrievaltypes import ( "bytes" + _ "embed" "errors" "fmt" @@ -11,9 +12,11 @@ import ( "github.com/ipfs/go-cid" "github.com/ipld/go-ipld-prime" "github.com/ipld/go-ipld-prime/codec/dagcbor" + "github.com/ipld/go-ipld-prime/datamodel" + "github.com/ipld/go-ipld-prime/node/bindnode" + bindnoderegistry "github.com/ipld/go-ipld-prime/node/bindnode/registry" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" - cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/net/context" "golang.org/x/xerrors" @@ -25,6 +28,9 @@ import ( //go:generate cbor-gen-for --map-encoding Query QueryResponse DealProposal DealResponse Params QueryParams DealPayment ClientDealState ProviderDealState PaymentInfo RetrievalPeer Ask +//go:embed types.ipldsch +var embedSchema []byte + // QueryProtocolID is the protocol for querying information about retrieval // deal parameters const QueryProtocolID = protocol.ID("/fil/retrieval/qry/1.0.0") @@ -260,7 +266,8 @@ func IsTerminalStatus(status DealStatus) bool { // Params are the parameters requested for a retrieval deal proposal type Params struct { - Selector *cbg.Deferred // V1 + Selector CborGenCompatibleNode // V1 + //Selector *cbg.Deferred PieceCID *cid.Cid PricePerByte abi.TokenAmount PaymentInterval uint64 // when to request payment @@ -268,8 +275,15 @@ type Params struct { UnsealPrice abi.TokenAmount } +// paramsBindnodeOptions is the bindnode options required to convert custom +// types used by the Param type +var paramsBindnodeOptions = []bindnode.Option{ + CborGenCompatibleNodeBindnodeOption, + TokenAmountBindnodeOption, +} + func (p Params) SelectorSpecified() bool { - return p.Selector != nil && !bytes.Equal(p.Selector.Raw, cbg.CborNull) + return !p.Selector.IsNull() } func (p Params) IntervalLowerBound(currentInterval uint64) uint64 { @@ -318,7 +332,9 @@ func NewParamsV1(pricePerByte abi.TokenAmount, paymentInterval uint64, paymentIn } return Params{ - Selector: &cbg.Deferred{Raw: buffer.Bytes()}, + Selector: CborGenCompatibleNode{ + Node: sel, + }, PieceCID: pieceCid, PricePerByte: pricePerByte, PaymentInterval: paymentInterval, @@ -349,6 +365,26 @@ func (dp *DealProposal) Type() datatransfer.TypeIdentifier { // DealProposalUndefined is an undefined deal proposal var DealProposalUndefined = DealProposal{} +// DealProposalType is the DealProposal voucher type +const DealProposalType = datatransfer.TypeIdentifier("RetrievalDealProposal/1") + +// dealProposalBindnodeOptions is the bindnode options required to convert +// custom types used by the DealProposal type; the only custom types involved +// are for Params so we can reuse those options. +var dealProposalBindnodeOptions = paramsBindnodeOptions + +func DealProposalFromNode(node datamodel.Node) (*DealProposal, error) { + if node == nil { + return nil, fmt.Errorf("empty voucher") + } + dpIface, err := BindnodeRegistry.TypeFromNode(node, &DealProposal{}) + if err != nil { + return nil, xerrors.Errorf("invalid DealProposal: %w", err) + } + dp, _ := dpIface.(*DealProposal) // safe to assume type + return dp, nil +} + // DealResponse is a response to a retrieval deal proposal type DealResponse struct { Status DealStatus @@ -507,3 +543,41 @@ type RetrievalClient interface { // ClientSubscriber is a callback that is registered to listen for retrieval events type ClientSubscriber func(event ClientEvent, state ClientDealState) + +var BindnodeRegistry = bindnoderegistry.NewRegistry() + +// DealResponseType is the DealResponse usable as a voucher type +const DealResponseType = datatransfer.TypeIdentifier("RetrievalDealResponse/1") + +// dealResponseBindnodeOptions is the bindnode options required to convert custom +// types used by the DealResponse type +var dealResponseBindnodeOptions = []bindnode.Option{TokenAmountBindnodeOption} + +// DealPaymentType is the DealPayment voucher type +const DealPaymentType = datatransfer.TypeIdentifier("RetrievalDealPayment/1") + +// dealPaymentBindnodeOptions is the bindnode options required to convert custom +// types used by the DealPayment type +var dealPaymentBindnodeOptions = []bindnode.Option{ + SignatureBindnodeOption, + AddressBindnodeOption, + BigIntBindnodeOption, + TokenAmountBindnodeOption, +} + +func init() { + for _, r := range []struct { + typ interface{} + typName string + opts []bindnode.Option + }{ + {(*Params)(nil), "Params", paramsBindnodeOptions}, + {(*DealProposal)(nil), "DealProposal", dealProposalBindnodeOptions}, + {(*DealResponse)(nil), "DealResponse", dealResponseBindnodeOptions}, + {(*DealPayment)(nil), "DealPayment", dealPaymentBindnodeOptions}, + } { + if err := BindnodeRegistry.RegisterType(r.typ, string(embedSchema), r.typName, r.opts...); err != nil { + panic(err.Error()) + } + } +} diff --git a/retrievalmarket/types/legacyretrievaltypes/types.ipldsch b/retrievalmarket/types/legacyretrievaltypes/types.ipldsch new file mode 100644 index 000000000..dbbc81b6b --- /dev/null +++ b/retrievalmarket/types/legacyretrievaltypes/types.ipldsch @@ -0,0 +1,52 @@ +type Params struct { + Selector nullable Any # CborGenCompatibleNode + PieceCID nullable &Any + PricePerByte Bytes # abi.TokenAmount + PaymentInterval Int + PaymentIntervalIncrease Int + UnsealPrice Bytes # abi.TokenAmount +} + +type DealProposal struct { + PayloadCID &Any + ID Int # DealID + Params Params +} + +type DealResponse struct { + Status Int + ID Int + PaymentOwed Bytes + Message String +} + +type DealPayment struct { + ID Int # DealID + PaymentChannel Bytes # address.Address + PaymentVoucher nullable SignedVoucher +} + +type SignedVoucher struct { + ChannelAddr Bytes # addr.Address + TimeLockMin Int # abi.ChainEpoch + TimeLockMax Int # abi.ChainEpoch + SecretHash Bytes + Extra nullable ModVerifyParams + Lane Int + Nonce Int + Amount Bytes # big.Int + MinSettleHeight Int # abi.ChainEpoch + Merges [Merge] + Signature nullable Bytes # crypto.Signature +} representation tuple + +type ModVerifyParams struct { + Actor Bytes # addr.Address + Method Int # abi.MethodNum + Data Bytes +} representation tuple + +type Merge struct { + Lane Int + Nonce Int +} representation tuple \ No newline at end of file diff --git a/retrievalmarket/types/legacyretrievaltypes/types_cbor_gen.go b/retrievalmarket/types/legacyretrievaltypes/types_cbor_gen.go index 64d1e4acc..d2901ffca 100644 --- a/retrievalmarket/types/legacyretrievaltypes/types_cbor_gen.go +++ b/retrievalmarket/types/legacyretrievaltypes/types_cbor_gen.go @@ -8,13 +8,13 @@ import ( "math" "sort" - "github.com/filecoin-project/boost/datatransfer" - "github.com/filecoin-project/boost/markets/piecestore" - "github.com/filecoin-project/go-state-types/builtin/v8/paych" - "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p/core/peer" + datatransfer "github.com/filecoin-project/boost/datatransfer" + piecestore "github.com/filecoin-project/boost/markets/piecestore" + paych "github.com/filecoin-project/go-state-types/builtin/v8/paych" + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p/core/peer" cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" + xerrors "golang.org/x/xerrors" ) var _ = xerrors.Errorf @@ -832,7 +832,7 @@ func (t *Params) MarshalCBOR(w io.Writer) error { } } - // t.Selector (typegen.Deferred) (struct) + // t.Selector (legacyretrievaltypes.CborGenCompatibleNode) (struct) if len("Selector") > cbg.MaxLength { return xerrors.Errorf("Value in field \"Selector\" was too long") } @@ -976,16 +976,15 @@ func (t *Params) UnmarshalCBOR(r io.Reader) (err error) { } } - // t.Selector (typegen.Deferred) (struct) + // t.Selector (legacyretrievaltypes.CborGenCompatibleNode) (struct) case "Selector": { - t.Selector = new(cbg.Deferred) - if err := t.Selector.UnmarshalCBOR(cr); err != nil { - return xerrors.Errorf("failed to read deferred field: %w", err) + return xerrors.Errorf("unmarshaling t.Selector: %w", err) } + } // t.UnsealPrice (big.Int) (struct) case "UnsealPrice": From 680ae64057c6807ca33afc8c9941a71d1be2ca7a Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Mon, 18 Dec 2023 19:57:12 +0400 Subject: [PATCH 18/34] restore selector type --- .../server/gsunpaidretrieval_test.go | 7 +-- retrievalmarket/server/validation.go | 26 +++++------ retrievalmarket/testutil/testutil.go | 5 +-- .../migrations/migrations.go | 2 +- .../migrations/migrations_cbor_gen.go | 12 +++--- .../types/legacyretrievaltypes/types.go | 43 +++++++++---------- .../legacyretrievaltypes/types_cbor_gen.go | 9 ++-- 7 files changed, 50 insertions(+), 54 deletions(-) diff --git a/retrievalmarket/server/gsunpaidretrieval_test.go b/retrievalmarket/server/gsunpaidretrieval_test.go index 45b1d421c..93eccf90a 100644 --- a/retrievalmarket/server/gsunpaidretrieval_test.go +++ b/retrievalmarket/server/gsunpaidretrieval_test.go @@ -65,11 +65,12 @@ type testCase struct { } var providerCancelled = errors.New("provider cancelled") -var clientCancelled = errors.New("client cancelled") -var clientRejected = errors.New("client received reject response") + +//var clientCancelled = errors.New("client cancelled") +//var clientRejected = errors.New("client received reject response") func TestGS(t *testing.T) { - //t.Skip("refactor tests to use boost client") + t.Skip("refactor tests to use boost client") //_ = logging.SetLogLevel("testgs", "debug") _ = logging.SetLogLevel("testgs", "info") _ = logging.SetLogLevel("dt-impl", "debug") diff --git a/retrievalmarket/server/validation.go b/retrievalmarket/server/validation.go index ed4c675d0..27727aefe 100644 --- a/retrievalmarket/server/validation.go +++ b/retrievalmarket/server/validation.go @@ -7,6 +7,7 @@ import ( "fmt" "github.com/filecoin-project/boost/datatransfer" + "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes/migrations" "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" "github.com/hannahhoward/go-pubsub" @@ -61,15 +62,15 @@ func (rv *requestValidator) validatePullRequest(isRestart bool, receiver peer.ID Response: &response, Error: err, }) - //if legacyProtocol { - // downgradedResponse := migrations.DealResponse0{ - // Status: response.Status, - // ID: response.ID, - // Message: response.Message, - // PaymentOwed: response.PaymentOwed, - // } - // return &downgradedResponse, err - //} + if legacyProtocol { + downgradedResponse := migrations.DealResponse0{ + Status: response.Status, + ID: response.ID, + Message: response.Message, + PaymentOwed: response.PaymentOwed, + } + return &downgradedResponse, err + } return &response, err } @@ -104,12 +105,7 @@ func (rv *requestValidator) acceptDeal(receiver peer.ID, proposal *legacyretriev } bytesCompare := allSelectorBytes if proposal.SelectorSpecified() { - w := new(bytes.Buffer) - err = proposal.Selector.MarshalCBOR(w) - if err != nil { - return err - } - bytesCompare = w.Bytes() + bytesCompare = proposal.Selector.Raw } if !bytes.Equal(buf.Bytes(), bytesCompare) { return errors.New("incorrect selector for this proposal") diff --git a/retrievalmarket/testutil/testutil.go b/retrievalmarket/testutil/testutil.go index 0655216e7..ea7577e86 100644 --- a/retrievalmarket/testutil/testutil.go +++ b/retrievalmarket/testutil/testutil.go @@ -2,7 +2,6 @@ package testutil import ( "context" - "io/ioutil" "os" "testing" @@ -85,9 +84,9 @@ func NewLibp2pTestData(ctx context.Context, t *testing.T) *Libp2pTestData { testData.DTStore1 = namespace.Wrap(testData.Ds1, datastore.NewKey("DataTransfer1")) testData.DTStore2 = namespace.Wrap(testData.Ds1, datastore.NewKey("DataTransfer2")) - testData.DTTmpDir1, err = ioutil.TempDir("", "dt-tmp-1") + testData.DTTmpDir1, err = os.MkdirTemp("", "dt-tmp-1") require.NoError(t, err) - testData.DTTmpDir2, err = ioutil.TempDir("", "dt-tmp-2") + testData.DTTmpDir2, err = os.MkdirTemp("", "dt-tmp-2") require.NoError(t, err) t.Cleanup(func() { _ = os.RemoveAll(testData.DTTmpDir1) diff --git a/retrievalmarket/types/legacyretrievaltypes/migrations/migrations.go b/retrievalmarket/types/legacyretrievaltypes/migrations/migrations.go index de36a39fb..85bfdb9e2 100644 --- a/retrievalmarket/types/legacyretrievaltypes/migrations/migrations.go +++ b/retrievalmarket/types/legacyretrievaltypes/migrations/migrations.go @@ -194,7 +194,7 @@ func MigrateQueryResponse0To1(oldQr QueryResponse0) legacyretrievaltypes.QueryRe // MigrateParams0To1 migrates tuple encoded deal params to map encoded deal params func MigrateParams0To1(oldParams Params0) legacyretrievaltypes.Params { return legacyretrievaltypes.Params{ - Selector: legacyretrievaltypes.CborGenCompatibleNode{Node: nil}, + Selector: oldParams.Selector, PieceCID: oldParams.PieceCID, PricePerByte: oldParams.PricePerByte, PaymentInterval: oldParams.PaymentInterval, diff --git a/retrievalmarket/types/legacyretrievaltypes/migrations/migrations_cbor_gen.go b/retrievalmarket/types/legacyretrievaltypes/migrations/migrations_cbor_gen.go index 95e6d6601..c31956d96 100644 --- a/retrievalmarket/types/legacyretrievaltypes/migrations/migrations_cbor_gen.go +++ b/retrievalmarket/types/legacyretrievaltypes/migrations/migrations_cbor_gen.go @@ -8,13 +8,13 @@ import ( "math" "sort" - "github.com/filecoin-project/boost/markets/piecestore/migrations" - "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" - "github.com/filecoin-project/go-state-types/builtin/v8/paych" - "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p/core/peer" + migrations "github.com/filecoin-project/boost/markets/piecestore/migrations" + legacyretrievaltypes "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" + paych "github.com/filecoin-project/go-state-types/builtin/v8/paych" + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p/core/peer" cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" + xerrors "golang.org/x/xerrors" ) var _ = xerrors.Errorf diff --git a/retrievalmarket/types/legacyretrievaltypes/types.go b/retrievalmarket/types/legacyretrievaltypes/types.go index 465beb7db..9938e42a1 100644 --- a/retrievalmarket/types/legacyretrievaltypes/types.go +++ b/retrievalmarket/types/legacyretrievaltypes/types.go @@ -17,6 +17,7 @@ import ( bindnoderegistry "github.com/ipld/go-ipld-prime/node/bindnode/registry" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" + cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/net/context" "golang.org/x/xerrors" @@ -266,8 +267,8 @@ func IsTerminalStatus(status DealStatus) bool { // Params are the parameters requested for a retrieval deal proposal type Params struct { - Selector CborGenCompatibleNode // V1 - //Selector *cbg.Deferred + //Selector CborGenCompatibleNode // V1 + Selector *cbg.Deferred PieceCID *cid.Cid PricePerByte abi.TokenAmount PaymentInterval uint64 // when to request payment @@ -283,7 +284,7 @@ var paramsBindnodeOptions = []bindnode.Option{ } func (p Params) SelectorSpecified() bool { - return !p.Selector.IsNull() + return p.Selector != nil && !bytes.Equal(p.Selector.Raw, cbg.CborNull) } func (p Params) IntervalLowerBound(currentInterval uint64) uint64 { @@ -332,9 +333,7 @@ func NewParamsV1(pricePerByte abi.TokenAmount, paymentInterval uint64, paymentIn } return Params{ - Selector: CborGenCompatibleNode{ - Node: sel, - }, + Selector: &cbg.Deferred{Raw: buffer.Bytes()}, PieceCID: pieceCid, PricePerByte: pricePerByte, PaymentInterval: paymentInterval, @@ -565,19 +564,19 @@ var dealPaymentBindnodeOptions = []bindnode.Option{ TokenAmountBindnodeOption, } -func init() { - for _, r := range []struct { - typ interface{} - typName string - opts []bindnode.Option - }{ - {(*Params)(nil), "Params", paramsBindnodeOptions}, - {(*DealProposal)(nil), "DealProposal", dealProposalBindnodeOptions}, - {(*DealResponse)(nil), "DealResponse", dealResponseBindnodeOptions}, - {(*DealPayment)(nil), "DealPayment", dealPaymentBindnodeOptions}, - } { - if err := BindnodeRegistry.RegisterType(r.typ, string(embedSchema), r.typName, r.opts...); err != nil { - panic(err.Error()) - } - } -} +//func init() { +// for _, r := range []struct { +// typ interface{} +// typName string +// opts []bindnode.Option +// }{ +// {(*Params)(nil), "Params", paramsBindnodeOptions}, +// {(*DealProposal)(nil), "DealProposal", dealProposalBindnodeOptions}, +// {(*DealResponse)(nil), "DealResponse", dealResponseBindnodeOptions}, +// {(*DealPayment)(nil), "DealPayment", dealPaymentBindnodeOptions}, +// } { +// if err := BindnodeRegistry.RegisterType(r.typ, string(embedSchema), r.typName, r.opts...); err != nil { +// panic(err.Error()) +// } +// } +//} diff --git a/retrievalmarket/types/legacyretrievaltypes/types_cbor_gen.go b/retrievalmarket/types/legacyretrievaltypes/types_cbor_gen.go index d2901ffca..1cfbd86be 100644 --- a/retrievalmarket/types/legacyretrievaltypes/types_cbor_gen.go +++ b/retrievalmarket/types/legacyretrievaltypes/types_cbor_gen.go @@ -832,7 +832,7 @@ func (t *Params) MarshalCBOR(w io.Writer) error { } } - // t.Selector (legacyretrievaltypes.CborGenCompatibleNode) (struct) + // t.Selector (typegen.Deferred) (struct) if len("Selector") > cbg.MaxLength { return xerrors.Errorf("Value in field \"Selector\" was too long") } @@ -976,15 +976,16 @@ func (t *Params) UnmarshalCBOR(r io.Reader) (err error) { } } - // t.Selector (legacyretrievaltypes.CborGenCompatibleNode) (struct) + // t.Selector (typegen.Deferred) (struct) case "Selector": { + t.Selector = new(cbg.Deferred) + if err := t.Selector.UnmarshalCBOR(cr); err != nil { - return xerrors.Errorf("unmarshaling t.Selector: %w", err) + return xerrors.Errorf("failed to read deferred field: %w", err) } - } // t.UnsealPrice (big.Int) (struct) case "UnsealPrice": From 7907a9f32036e810376bb2a99bb1b5541a9840ed Mon Sep 17 00:00:00 2001 From: Rod Vagg Date: Tue, 19 Dec 2023 20:39:57 +1100 Subject: [PATCH 19/34] fix: clean up and fix retrieval framework & tests (#1848) * fix: clean up and fix retrieval framework & tests * Use new go-trustless-utils primitives and traversal tools * Simplify Framework#Retrieve * fix: wait for full startup of data transfer when making client --- go.mod | 3 +- go.sum | 7 +- itests/dummydeal_offline_test.go | 8 +- itests/dummydeal_test.go | 8 +- itests/framework/framework.go | 109 +++++++++-------- itests/graphsync_identity_cid_test.go | 5 +- itests/graphsync_retrieval_test.go | 114 +++++++++++------- itests/multiminer_retrieval_graphsync_test.go | 8 +- markets/utils/selectors.go | 45 +++---- retrievalmarket/client/client.go | 28 ++++- 10 files changed, 211 insertions(+), 124 deletions(-) diff --git a/go.mod b/go.mod index 5e8e02008..f62c7e55a 100644 --- a/go.mod +++ b/go.mod @@ -213,7 +213,7 @@ require ( github.com/ipfs/go-ipld-cbor v0.1.0 github.com/ipfs/go-log v1.0.5 // indirect github.com/ipfs/go-peertaskqueue v0.8.1 // indirect - github.com/ipfs/go-unixfsnode v1.8.0 + github.com/ipfs/go-unixfsnode v1.9.0 github.com/ipfs/go-verifcid v0.0.2 // indirect github.com/ipld/go-codec-dagpb v1.6.0 github.com/ipld/go-ipld-adl-hamt v0.0.0-20220616142416-9004dbd839e0 // indirect @@ -327,6 +327,7 @@ require ( github.com/filecoin-project/lotus v1.23.4-rc1 github.com/ipfs/boxo v0.12.0 github.com/ipfs/kubo v0.22.0 + github.com/ipld/go-trustless-utils v0.4.1 github.com/ipni/go-libipni v0.5.1 github.com/ipni/ipni-cli v0.1.1 github.com/schollz/progressbar/v3 v3.13.1 diff --git a/go.sum b/go.sum index 2b9f602af..6c8a506aa 100644 --- a/go.sum +++ b/go.sum @@ -890,8 +890,8 @@ github.com/ipfs/go-unixfs v0.2.2-0.20190827150610-868af2e9e5cb/go.mod h1:IwAAgul github.com/ipfs/go-unixfs v0.3.1/go.mod h1:h4qfQYzghiIc8ZNFKiLMFWOTzrWIAtzYQ59W/pCFf1o= github.com/ipfs/go-unixfs v0.4.5 h1:wj8JhxvV1G6CD7swACwSKYa+NgtdWC1RUit+gFnymDU= github.com/ipfs/go-unixfsnode v1.4.0/go.mod h1:qc7YFFZ8tABc58p62HnIYbUMwj9chhUuFWmxSokfePo= -github.com/ipfs/go-unixfsnode v1.8.0 h1:yCkakzuE365glu+YkgzZt6p38CSVEBPgngL9ZkfnyQU= -github.com/ipfs/go-unixfsnode v1.8.0/go.mod h1:HxRu9HYHOjK6HUqFBAi++7DVoWAHn0o4v/nZ/VA+0g8= +github.com/ipfs/go-unixfsnode v1.9.0 h1:ubEhQhr22sPAKO2DNsyVBW7YB/zA8Zkif25aBvz8rc8= +github.com/ipfs/go-unixfsnode v1.9.0/go.mod h1:HxRu9HYHOjK6HUqFBAi++7DVoWAHn0o4v/nZ/VA+0g8= github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= github.com/ipfs/go-verifcid v0.0.2 h1:XPnUv0XmdH+ZIhLGKg6U2vaPaRDXb9urMyNVCE7uvTs= github.com/ipfs/go-verifcid v0.0.2/go.mod h1:40cD9x1y4OWnFXbLNJYRe7MpNvWlMn3LZAG5Wb4xnPU= @@ -926,6 +926,9 @@ github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236dd h1:gMlw/MhNr2Wtp5RwGdsW23cs+yCuj9k2ON7i9MiJlRo= github.com/ipld/go-ipld-selector-text-lite v0.0.1 h1:lNqFsQpBHc3p5xHob2KvEg/iM5dIFn6iw4L/Hh+kS1Y= github.com/ipld/go-ipld-selector-text-lite v0.0.1/go.mod h1:U2CQmFb+uWzfIEF3I1arrDa5rwtj00PrpiwwCO+k1RM= +github.com/ipld/go-trustless-utils v0.4.1 h1:puA14381Hg2LzH724mZ5ZFKFx+FFjjT5fPFs01vwlgM= +github.com/ipld/go-trustless-utils v0.4.1/go.mod h1:DgGuyfJ33goYwYVisjnxrlra0HVmZuHWVisVIkzVo1o= +github.com/ipld/ipld/specs v0.0.0-20231012031213-54d3b21deda4 h1:0VXv637/xpI0Pb5J8K+K8iRtTw4DOcxs0MB1HMzfwNY= github.com/ipni/go-libipni v0.5.1 h1:HumuJtKmV8RoDpBakLgxCSl5QPiD2ljTZl/NOyXO6nM= github.com/ipni/go-libipni v0.5.1/go.mod h1:UnrhEqjVI2Z2HXlaieOBONJmtW557nZkYpB4IIsMD+s= github.com/ipni/index-provider v0.14.2 h1:daA3IFnI2n2x/mL0K91SQHNLq6Vvfp5q4uFX9G4glvE= diff --git a/itests/dummydeal_offline_test.go b/itests/dummydeal_offline_test.go index 296d02600..fb11e06f6 100644 --- a/itests/dummydeal_offline_test.go +++ b/itests/dummydeal_offline_test.go @@ -9,6 +9,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/itests/kit" "github.com/google/uuid" + trustlessutils "github.com/ipld/go-trustless-utils" "github.com/stretchr/testify/require" ) @@ -46,6 +47,11 @@ func TestDummydealOffline(t *testing.T) { err = f.WaitForDealAddedToSector(offlineDealUuid) require.NoError(t, err) - outFile := f.Retrieve(ctx, t, tempdir, rootCid, dealRes.DealParams.ClientDealProposal.Proposal.PieceCID, true, nil) + outFile := f.Retrieve( + ctx, + t, + trustlessutils.Request{Root: rootCid, Scope: trustlessutils.DagScopeAll}, + true, + ) kit.AssertFilesEqual(t, randomFilepath, outFile) } diff --git a/itests/dummydeal_test.go b/itests/dummydeal_test.go index 2a7c9a692..b24ea3c10 100644 --- a/itests/dummydeal_test.go +++ b/itests/dummydeal_test.go @@ -12,6 +12,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/itests/kit" "github.com/google/uuid" + trustlessutils "github.com/ipld/go-trustless-utils" "github.com/stretchr/testify/require" ) @@ -94,6 +95,11 @@ func TestDummydealOnline(t *testing.T) { require.NoError(t, err) // rootCid is an identity CID - outFile := f.Retrieve(ctx, t, tempdir, rootCid, res.DealParams.ClientDealProposal.Proposal.PieceCID, true, nil) + outFile := f.Retrieve( + ctx, + t, + trustlessutils.Request{Root: rootCid, Scope: trustlessutils.DagScopeAll}, + true, + ) kit.AssertFilesEqual(t, randomFilepath, outFile) } diff --git a/itests/framework/framework.go b/itests/framework/framework.go index f7bb51525..ac96e5e57 100644 --- a/itests/framework/framework.go +++ b/itests/framework/framework.go @@ -1,10 +1,12 @@ package framework import ( + "bytes" "context" "encoding/json" "errors" "fmt" + "io" "math/rand" "os" "path" @@ -71,10 +73,15 @@ import ( ipldformat "github.com/ipfs/go-ipld-format" logging "github.com/ipfs/go-log/v2" "github.com/ipld/go-car" + carv2 "github.com/ipld/go-car/v2" + storagecar "github.com/ipld/go-car/v2/storage" "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/codec/dagjson" "github.com/ipld/go-ipld-prime/datamodel" + "github.com/ipld/go-ipld-prime/linking" cidlink "github.com/ipld/go-ipld-prime/linking/cid" - "github.com/ipld/go-ipld-prime/traversal" + trustless "github.com/ipld/go-trustless-utils" + traversal "github.com/ipld/go-trustless-utils/traversal" "github.com/libp2p/go-libp2p" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" @@ -798,9 +805,15 @@ func (f *TestFramework) ExtractFileFromCAR(ctx context.Context, t *testing.T, fi return tmpFile } -func (f *TestFramework) Retrieve(ctx context.Context, t *testing.T, tempdir string, root cid.Cid, pieceCid cid.Cid, extractCar bool, selectorNode datamodel.Node) string { +func (f *TestFramework) Retrieve(ctx context.Context, t *testing.T, request trustless.Request, extractCar bool) string { + tempdir := t.TempDir() + + var out string + retPath := path.Join(tempdir, "retrievals") + require.NoError(t, os.Mkdir(retPath, 0755)) + clientPath := path.Join(tempdir, "client") - _ = os.Mkdir(clientPath, 0755) + require.NoError(t, os.Mkdir(clientPath, 0755)) clientNode, err := clinode.Setup(clientPath) require.NoError(t, err) @@ -812,8 +825,7 @@ func (f *TestFramework) Retrieve(ctx context.Context, t *testing.T, tempdir stri bstore := blockstore.NewBlockstore(bstoreDatastore, blockstore.NoPrefix()) require.NoError(t, err) - //ds, err := levelds.NewDatastore(path.Join(clientPath, "dstore"), nil) - ds, err := levelds.NewDatastore("", nil) + ds, err := levelds.NewDatastore(path.Join(clientPath, "dstore"), nil) require.NoError(t, err) // Create the retrieval client @@ -823,11 +835,18 @@ func (f *TestFramework) Retrieve(ctx context.Context, t *testing.T, tempdir stri baddrs, err := f.Boost.NetAddrsListen(ctx) require.NoError(t, err) - query, err := RetrievalQuery(ctx, t, clientNode, &baddrs, pieceCid) + // Query the remote to find out the retrieval parameters + query, err := RetrievalQuery(ctx, t, clientNode, &baddrs, request.Root) + require.NoError(t, err) + + // Create a matching proposal for the query + proposal, err := rc.RetrievalProposalForAsk(query, request.Root, request.Selector()) require.NoError(t, err) - proposal, err := rc.RetrievalProposalForAsk(query, root, selectorNode) + // Let's see the selector we're working with + encoded, err := ipld.Encode(request.Selector(), dagjson.Encode) require.NoError(t, err) + t.Logf("Retrieving with selector: %s", string(encoded)) // Retrieve the data _, err = fc.RetrieveContentWithProgressCallback( @@ -840,56 +859,52 @@ func (f *TestFramework) Retrieve(ctx context.Context, t *testing.T, tempdir stri ) require.NoError(t, err) + // Validate the data + dservOffline := dag.NewDAGService(blockservice.New(bstore, offline.Exchange(bstore))) + lsys := utils.CreateLinkSystem(dservOffline) - // if we used a selector - need to find the sub-root the user actually wanted to retrieve - if selectorNode != nil { - if !selectorNode.IsNull() { - var subRootFound bool - err := utils.TraverseDag( - ctx, - dservOffline, - root, - selectorNode, - func(p traversal.Progress, n ipld.Node, r traversal.VisitReason) error { - if r == traversal.VisitReason_SelectionMatch { - - require.Equal(t, p.LastBlock.Path.String(), p.Path.String()) - - cidLnk, castOK := p.LastBlock.Link.(cidlink.Link) - require.True(t, castOK) - - root = cidLnk.Cid - subRootFound = true - } - return nil - }, - ) - require.NoError(t, err) - require.True(t, subRootFound) + if !extractCar { + // If the caller wants a CAR, we create it and then when we run our check traversal over the DAG + // each load will trigger a write to the CAR + file, err := os.CreateTemp(retPath, "*"+request.Root.String()+".car") + require.NoError(t, err) + out = file.Name() + storage, err := storagecar.NewWritable(file, []cid.Cid{request.Root}, carv2.WriteAsCarV1(true)) + require.NoError(t, err) + sro := lsys.StorageReadOpener + lsys.StorageReadOpener = func(lc linking.LinkContext, l datamodel.Link) (io.Reader, error) { + r, err := sro(lc, l) + if err != nil { + return nil, err + } + buf, err := io.ReadAll(r) + if err != nil { + return nil, err + } + if err := storage.Put(lc.Ctx, l.(cidlink.Link).Cid.KeyString(), buf); err != nil { + return nil, err + } + return bytes.NewReader(buf), nil } } - dnode, err := dservOffline.Get(ctx, root) + // Check that we got what we expected by executing the same selector over our + // retrieved DAG + _, err = traversal.Config{ + Root: request.Root, + Selector: request.Selector(), + }.Traverse(ctx, lsys, nil) require.NoError(t, err) - var out string - retPath := path.Join(tempdir, "retrievals") - _ = os.Mkdir(retPath, 0755) - - if !extractCar { - // Write file as car file - file, err := os.CreateTemp(retPath, "*"+root.String()+".car") - require.NoError(t, err) - out = file.Name() - err = car.WriteCar(ctx, dservOffline, []cid.Cid{root}, file) + if extractCar { + // Caller doesn't want the raw blocks, so extract the file as UnixFS and + // assume that we've fetched the right blocks to be able to do this. + dnode, err := dservOffline.Get(ctx, request.Root) require.NoError(t, err) - - } else { - // Otherwise write file as UnixFS File ufsFile, err := unixfile.NewUnixfsFile(ctx, dservOffline, dnode) require.NoError(t, err) - file, err := os.CreateTemp(retPath, "*"+root.String()) + file, err := os.CreateTemp(retPath, "*"+request.Root.String()) require.NoError(t, err) err = file.Close() require.NoError(t, err) diff --git a/itests/graphsync_identity_cid_test.go b/itests/graphsync_identity_cid_test.go index daa6b9ca7..a0e9a80d1 100644 --- a/itests/graphsync_identity_cid_test.go +++ b/itests/graphsync_identity_cid_test.go @@ -24,7 +24,7 @@ import ( "github.com/ipld/go-ipld-prime/fluent/qp" cidlink "github.com/ipld/go-ipld-prime/linking/cid" "github.com/ipld/go-ipld-prime/node/basicnode" - selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" + trustlessutils "github.com/ipld/go-trustless-utils" "github.com/multiformats/go-multihash" "github.com/stretchr/testify/require" ) @@ -110,7 +110,6 @@ func TestDealAndRetrievalWithIdentityCID(t *testing.T) { log.Debugw("got response from MarketDummyDeal", "res", spew.Sdump(res)) dealCid, err := res.DealParams.ClientDealProposal.Proposal.Cid() require.NoError(t, err) - pieceCid := res.DealParams.ClientDealProposal.Proposal.PieceCID log.Infof("deal ID is : %s", dealCid.String()) // Wait for the first deal to be added to a sector and cleaned up so space is made err = f.WaitForDealAddedToSector(dealUuid) @@ -121,7 +120,7 @@ func TestDealAndRetrievalWithIdentityCID(t *testing.T) { // Deal is stored and sealed, attempt different retrieval forms log.Debugw("deal is sealed, starting retrieval", "cid", dealCid.String(), "root", root.String()) - outPath := f.Retrieve(ctx, t, tempdir, root, pieceCid, false, selectorparse.CommonSelector_ExploreAllRecursively) + outPath := f.Retrieve(ctx, t, trustlessutils.Request{Root: root, Scope: trustlessutils.DagScopeAll}, false) // Inspect what we got gotCids, err := testutil.CidsInCar(outPath) diff --git a/itests/graphsync_retrieval_test.go b/itests/graphsync_retrieval_test.go index 8e1810687..5d3c59506 100644 --- a/itests/graphsync_retrieval_test.go +++ b/itests/graphsync_retrieval_test.go @@ -2,7 +2,6 @@ package itests import ( "context" - "math" "path/filepath" "testing" "time" @@ -14,10 +13,7 @@ import ( "github.com/filecoin-project/lotus/itests/kit" "github.com/google/uuid" "github.com/ipfs/go-cid" - "github.com/ipfs/go-unixfsnode" - "github.com/ipld/go-ipld-prime/datamodel" - "github.com/ipld/go-ipld-prime/node/basicnode" - "github.com/ipld/go-ipld-prime/traversal/selector/builder" + trustless "github.com/ipld/go-trustless-utils" "github.com/stretchr/testify/require" ) @@ -114,51 +110,71 @@ func TestDealRetrieval(t *testing.T) { // Deal is stored and sealed, attempt different retrieval forms retrievalCases := []struct { - name string - selector datamodel.Node - matcherFrom, matcherTo int64 - expectCids []cid.Cid + name string + request trustless.Request + expectCids []cid.Cid }{ { - name: "full file, explore-all", - selector: unixfsnode.UnixFSPathSelectorBuilder("", unixfsnode.ExploreAllRecursivelySelector, false), + name: "full file, explore-all", + request: trustless.Request{ + Root: root, + Scope: trustless.DagScopeAll, + }, expectCids: append([]cid.Cid{root}, leaves...), }, { - name: "slice: 0 to 7MiB", - matcherFrom: 0, - matcherTo: 7 << 20, - expectCids: append([]cid.Cid{root}, leaves...), + name: "slice: 0 to 7MiB", + request: trustless.Request{ + Root: root, + Scope: trustless.DagScopeEntity, + Bytes: &trustless.ByteRange{From: 0, To: ptrInt(7 << 20)}, + }, + expectCids: append([]cid.Cid{root}, leaves...), }, { - name: "slice: 1MiB to 2MiB", - matcherFrom: 1 << 20, - matcherTo: 2 << 20, - expectCids: append([]cid.Cid{root}, leaves[4:9]...), + name: "slice: 1MiB to 2MiB", + request: trustless.Request{ + Root: root, + Scope: trustless.DagScopeEntity, + Bytes: &trustless.ByteRange{From: 1 << 20, To: ptrInt(2 << 20)}, + }, + expectCids: append([]cid.Cid{root}, leaves[4:9]...), }, { - name: "slice: first byte", - matcherFrom: 0, - matcherTo: 1, - expectCids: append([]cid.Cid{root}, leaves[0]), + name: "slice: first byte", + request: trustless.Request{ + Root: root, + Scope: trustless.DagScopeEntity, + Bytes: &trustless.ByteRange{From: 0, To: ptrInt(1)}, + }, + expectCids: append([]cid.Cid{root}, leaves[0]), }, { - name: "slice: last byte", - matcherFrom: 7340031, - matcherTo: 7340032, - expectCids: append([]cid.Cid{root}, leaves[len(leaves)-1]), + name: "slice: last byte", + request: trustless.Request{ + Root: root, + Scope: trustless.DagScopeEntity, + Bytes: &trustless.ByteRange{From: 7340031, To: ptrInt(7340032)}, + }, + expectCids: append([]cid.Cid{root}, leaves[len(leaves)-1]), }, { - name: "slice: last two blocks, negative range, boundary", - matcherFrom: -168000 - 1, - matcherTo: math.MaxInt64, - expectCids: append([]cid.Cid{root}, leaves[len(leaves)-2:]...), + name: "slice: last two blocks, negative range, boundary", + request: trustless.Request{ + Root: root, + Scope: trustless.DagScopeEntity, + Bytes: &trustless.ByteRange{From: -168000 - 1}, + }, + expectCids: append([]cid.Cid{root}, leaves[len(leaves)-2:]...), }, { - name: "slice: last block, negative range, boundary", - matcherFrom: -168000, - matcherTo: math.MaxInt64, - expectCids: append([]cid.Cid{root}, leaves[len(leaves)-1]), + name: "slice: last block, negative range, boundary", + request: trustless.Request{ + Root: root, + Scope: trustless.DagScopeEntity, + Bytes: &trustless.ByteRange{From: -168000}, + }, + expectCids: append([]cid.Cid{root}, leaves[len(leaves)-1]), }, { // In this case we are attempting to traverse beyond the file to a @@ -166,24 +182,26 @@ func TestDealRetrieval(t *testing.T) { // return that. This is not strictly an error case, it's up to the // consumer of this data to verify the path doesn't resolve in the // data they get back. - name: "path beyond file", - selector: unixfsnode.UnixFSPathSelectorBuilder("not/a/path", unixfsnode.ExploreAllRecursivelySelector, false), + name: "path beyond file", + request: trustless.Request{ + Root: root, + Scope: trustless.DagScopeAll, + Path: "not/a/path", + }, expectCids: []cid.Cid{root}, }, } for _, tc := range retrievalCases { t.Run(tc.name, func(t *testing.T) { - selNode := tc.selector - if selNode == nil { - // build a selector from the specified slice matcher range - ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) - ss := ssb.ExploreInterpretAs("unixfs", ssb.MatcherSubset(tc.matcherFrom, tc.matcherTo)) - selNode = ss.Node() - } - log.Debugw("deal is sealed, starting retrieval", "cid", dealCid.String(), "root", root) - outPath := f.Retrieve(ctx, t, tempdir, root, res.DealParams.ClientDealProposal.Proposal.PieceCID, false, selNode) + + outPath := f.Retrieve( + ctx, + t, + tc.request, + false, + ) // Inspect what we got gotCids, err := testutil.CidsInCar(outPath) @@ -202,3 +220,7 @@ func TestDealRetrieval(t *testing.T) { }) } } + +func ptrInt(i int64) *int64 { + return &i +} diff --git a/itests/multiminer_retrieval_graphsync_test.go b/itests/multiminer_retrieval_graphsync_test.go index c79b9f8be..81633074b 100644 --- a/itests/multiminer_retrieval_graphsync_test.go +++ b/itests/multiminer_retrieval_graphsync_test.go @@ -6,6 +6,7 @@ import ( "github.com/filecoin-project/boost/itests/shared" "github.com/filecoin-project/lotus/itests/kit" + trustlessutils "github.com/ipld/go-trustless-utils" ) func TestMultiMinerRetrievalGraphsync(t *testing.T) { @@ -17,7 +18,12 @@ func TestMultiMinerRetrievalGraphsync(t *testing.T) { // - recognize that the deal is for a sector on the first miner // - read the data for the deal from the first miner t.Logf("deal is added to piece, starting retrieval of root %s", rt.RootCid) - outPath := rt.BoostAndMiner2.Retrieve(ctx, t, rt.TempDir, rt.RootCid, rt.PieceCid, true, nil) + outPath := rt.BoostAndMiner2.Retrieve( + ctx, + t, + trustlessutils.Request{Root: rt.RootCid, Scope: trustlessutils.DagScopeAll}, + true, + ) t.Logf("retrieval is done, compare in- and out- files in: %s, out: %s", rt.SampleFilePath, outPath) kit.AssertFilesEqual(t, rt.SampleFilePath, outPath) diff --git a/markets/utils/selectors.go b/markets/utils/selectors.go index e1009d1ff..ec6a0426d 100644 --- a/markets/utils/selectors.go +++ b/markets/utils/selectors.go @@ -7,20 +7,41 @@ import ( "io" // must be imported to init() raw-codec support + dagpb "github.com/ipld/go-codec-dagpb" _ "github.com/ipld/go-ipld-prime/codec/raw" + "github.com/ipld/go-ipld-prime/linking" + "github.com/ipld/go-ipld-prime/node/basicnode" "github.com/ipfs/go-cid" mdagipld "github.com/ipfs/go-ipld-format" "github.com/ipfs/go-unixfsnode" - dagpb "github.com/ipld/go-codec-dagpb" "github.com/ipld/go-ipld-prime" cidlink "github.com/ipld/go-ipld-prime/linking/cid" - basicnode "github.com/ipld/go-ipld-prime/node/basic" "github.com/ipld/go-ipld-prime/traversal" "github.com/ipld/go-ipld-prime/traversal/selector" selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" ) +func CreateLinkSystem(ds mdagipld.DAGService) linking.LinkSystem { + // this is how we implement GETs + linkSystem := cidlink.DefaultLinkSystem() + linkSystem.StorageReadOpener = func(lctx ipld.LinkContext, lnk ipld.Link) (io.Reader, error) { + cl, isCid := lnk.(cidlink.Link) + if !isCid { + return nil, fmt.Errorf("unexpected link type %#v", lnk) + } + + node, err := ds.Get(lctx.Ctx, cl.Cid) + if err != nil { + return nil, err + } + + return bytes.NewBuffer(node.RawData()), nil + } + unixfsnode.AddUnixFSReificationToLinkSystem(&linkSystem) + return linkSystem +} + func TraverseDag( ctx context.Context, ds mdagipld.DAGService, @@ -38,9 +59,8 @@ func TraverseDag( return err } - // not sure what this is for TBH: we also provide ctx in &traversal.Config{} linkContext := ipld.LinkContext{Ctx: ctx} - + linkSystem := CreateLinkSystem(ds) // this is what allows us to understand dagpb nodePrototypeChooser := dagpb.AddSupportToChooser( func(ipld.Link, ipld.LinkContext) (ipld.NodePrototype, error) { @@ -48,23 +68,6 @@ func TraverseDag( }, ) - // this is how we implement GETs - linkSystem := cidlink.DefaultLinkSystem() - linkSystem.StorageReadOpener = func(lctx ipld.LinkContext, lnk ipld.Link) (io.Reader, error) { - cl, isCid := lnk.(cidlink.Link) - if !isCid { - return nil, fmt.Errorf("unexpected link type %#v", lnk) - } - - node, err := ds.Get(lctx.Ctx, cl.Cid) - if err != nil { - return nil, err - } - - return bytes.NewBuffer(node.RawData()), nil - } - unixfsnode.AddUnixFSReificationToLinkSystem(&linkSystem) - // this is how we pull the start node out of the DS startLink := cidlink.Link{Cid: startFrom} startNodePrototype, err := nodePrototypeChooser(startLink, linkContext) diff --git a/retrievalmarket/client/client.go b/retrievalmarket/client/client.go index 78ca1f28e..23eac8e82 100644 --- a/retrievalmarket/client/client.go +++ b/retrievalmarket/client/client.go @@ -31,6 +31,7 @@ import ( "github.com/ipfs/go-datastore" logging "github.com/ipfs/go-log/v2" "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/codec/dagcbor" selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" "github.com/libp2p/go-libp2p/core/host" inet "github.com/libp2p/go-libp2p/core/network" @@ -160,10 +161,27 @@ func NewClientWithConfig(cfg *Config) (*Client, error) { } } + errCh := make(chan error) + startedCh := make(chan struct{}) + + mgr.OnReady(func(err error) { + if err != nil { + errCh <- err + return + } + close(startedCh) + }) + if err := mgr.Start(context.Background()); err != nil { return nil, err } + select { + case <-startedCh: + case err := <-errCh: + return nil, err + } + c := &Client{ host: cfg.Host, api: cfg.Api, @@ -537,7 +555,15 @@ func (c *Client) retrieveContentFromPeerWithProgressCallback( defer unsubscribe() // Submit the retrieval deal proposal to the miner - newchid, err := c.dataTransfer.OpenPullDataChannel(ctx, peerID, proposal, proposal.PayloadCID, selectorparse.CommonSelector_ExploreAllRecursively) + selector := selectorparse.CommonSelector_ExploreAllRecursively + if proposal.SelectorSpecified() { + var err error + selector, err = ipld.Decode(proposal.Selector.Raw, dagcbor.Decode) + if err != nil { + return nil, fmt.Errorf("failed to decode selector from proposal: %w", err) + } + } + newchid, err := c.dataTransfer.OpenPullDataChannel(ctx, peerID, proposal, proposal.PayloadCID, selector) if err != nil { // We could fail before a successful proposal // publish event failure From d9f8fa076755143d3820600484b396a30cbc4d67 Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Tue, 19 Dec 2023 13:50:36 +0400 Subject: [PATCH 20/34] fix lint errors --- retrievalmarket/server/gsunpaidretrieval.go | 10 ++-- .../types/legacyretrievaltypes/types.go | 46 ---------------- .../types/legacyretrievaltypes/types.ipldsch | 52 ------------------- 3 files changed, 5 insertions(+), 103 deletions(-) delete mode 100644 retrievalmarket/types/legacyretrievaltypes/types.ipldsch diff --git a/retrievalmarket/server/gsunpaidretrieval.go b/retrievalmarket/server/gsunpaidretrieval.go index 2adec3999..5ebea3236 100644 --- a/retrievalmarket/server/gsunpaidretrieval.go +++ b/retrievalmarket/server/gsunpaidretrieval.go @@ -280,11 +280,11 @@ func (g *GraphsyncUnpaidRetrieval) interceptRetrieval(p peer.ID, request graphsy proposal := *v log.Debugw("intercepting retrieval deal", "proposal", proposal) return g.handleRetrievalDeal(p, msg, proposal, request, RetrievalTypeDeal) - //case *migrations.DealProposal0: - // // This is a retrieval deal with an older format - // proposal := migrations.MigrateDealProposal0To1(*v) - // log.Debugw("intercepting retrieval deal v1", "proposal", proposal) - // return g.handleRetrievalDeal(p, msg, proposal, request, RetrievalTypeDeal) + case *migrations.DealProposal0: + // This is a retrieval deal with an older format + proposal := migrations.MigrateDealProposal0To1(*v) + log.Debugw("intercepting retrieval deal v1", "proposal", proposal) + return g.handleRetrievalDeal(p, msg, proposal, request, RetrievalTypeDeal) } log.Debugw("ignoring request", "request", request) diff --git a/retrievalmarket/types/legacyretrievaltypes/types.go b/retrievalmarket/types/legacyretrievaltypes/types.go index 9938e42a1..51c834bf5 100644 --- a/retrievalmarket/types/legacyretrievaltypes/types.go +++ b/retrievalmarket/types/legacyretrievaltypes/types.go @@ -13,7 +13,6 @@ import ( "github.com/ipld/go-ipld-prime" "github.com/ipld/go-ipld-prime/codec/dagcbor" "github.com/ipld/go-ipld-prime/datamodel" - "github.com/ipld/go-ipld-prime/node/bindnode" bindnoderegistry "github.com/ipld/go-ipld-prime/node/bindnode/registry" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" @@ -29,9 +28,6 @@ import ( //go:generate cbor-gen-for --map-encoding Query QueryResponse DealProposal DealResponse Params QueryParams DealPayment ClientDealState ProviderDealState PaymentInfo RetrievalPeer Ask -//go:embed types.ipldsch -var embedSchema []byte - // QueryProtocolID is the protocol for querying information about retrieval // deal parameters const QueryProtocolID = protocol.ID("/fil/retrieval/qry/1.0.0") @@ -276,13 +272,6 @@ type Params struct { UnsealPrice abi.TokenAmount } -// paramsBindnodeOptions is the bindnode options required to convert custom -// types used by the Param type -var paramsBindnodeOptions = []bindnode.Option{ - CborGenCompatibleNodeBindnodeOption, - TokenAmountBindnodeOption, -} - func (p Params) SelectorSpecified() bool { return p.Selector != nil && !bytes.Equal(p.Selector.Raw, cbg.CborNull) } @@ -367,11 +356,6 @@ var DealProposalUndefined = DealProposal{} // DealProposalType is the DealProposal voucher type const DealProposalType = datatransfer.TypeIdentifier("RetrievalDealProposal/1") -// dealProposalBindnodeOptions is the bindnode options required to convert -// custom types used by the DealProposal type; the only custom types involved -// are for Params so we can reuse those options. -var dealProposalBindnodeOptions = paramsBindnodeOptions - func DealProposalFromNode(node datamodel.Node) (*DealProposal, error) { if node == nil { return nil, fmt.Errorf("empty voucher") @@ -548,35 +532,5 @@ var BindnodeRegistry = bindnoderegistry.NewRegistry() // DealResponseType is the DealResponse usable as a voucher type const DealResponseType = datatransfer.TypeIdentifier("RetrievalDealResponse/1") -// dealResponseBindnodeOptions is the bindnode options required to convert custom -// types used by the DealResponse type -var dealResponseBindnodeOptions = []bindnode.Option{TokenAmountBindnodeOption} - // DealPaymentType is the DealPayment voucher type const DealPaymentType = datatransfer.TypeIdentifier("RetrievalDealPayment/1") - -// dealPaymentBindnodeOptions is the bindnode options required to convert custom -// types used by the DealPayment type -var dealPaymentBindnodeOptions = []bindnode.Option{ - SignatureBindnodeOption, - AddressBindnodeOption, - BigIntBindnodeOption, - TokenAmountBindnodeOption, -} - -//func init() { -// for _, r := range []struct { -// typ interface{} -// typName string -// opts []bindnode.Option -// }{ -// {(*Params)(nil), "Params", paramsBindnodeOptions}, -// {(*DealProposal)(nil), "DealProposal", dealProposalBindnodeOptions}, -// {(*DealResponse)(nil), "DealResponse", dealResponseBindnodeOptions}, -// {(*DealPayment)(nil), "DealPayment", dealPaymentBindnodeOptions}, -// } { -// if err := BindnodeRegistry.RegisterType(r.typ, string(embedSchema), r.typName, r.opts...); err != nil { -// panic(err.Error()) -// } -// } -//} diff --git a/retrievalmarket/types/legacyretrievaltypes/types.ipldsch b/retrievalmarket/types/legacyretrievaltypes/types.ipldsch deleted file mode 100644 index dbbc81b6b..000000000 --- a/retrievalmarket/types/legacyretrievaltypes/types.ipldsch +++ /dev/null @@ -1,52 +0,0 @@ -type Params struct { - Selector nullable Any # CborGenCompatibleNode - PieceCID nullable &Any - PricePerByte Bytes # abi.TokenAmount - PaymentInterval Int - PaymentIntervalIncrease Int - UnsealPrice Bytes # abi.TokenAmount -} - -type DealProposal struct { - PayloadCID &Any - ID Int # DealID - Params Params -} - -type DealResponse struct { - Status Int - ID Int - PaymentOwed Bytes - Message String -} - -type DealPayment struct { - ID Int # DealID - PaymentChannel Bytes # address.Address - PaymentVoucher nullable SignedVoucher -} - -type SignedVoucher struct { - ChannelAddr Bytes # addr.Address - TimeLockMin Int # abi.ChainEpoch - TimeLockMax Int # abi.ChainEpoch - SecretHash Bytes - Extra nullable ModVerifyParams - Lane Int - Nonce Int - Amount Bytes # big.Int - MinSettleHeight Int # abi.ChainEpoch - Merges [Merge] - Signature nullable Bytes # crypto.Signature -} representation tuple - -type ModVerifyParams struct { - Actor Bytes # addr.Address - Method Int # abi.MethodNum - Data Bytes -} representation tuple - -type Merge struct { - Lane Int - Nonce Int -} representation tuple \ No newline at end of file From 28a31a1dab66edd6b538309a38ae8277ed264513 Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Tue, 19 Dec 2023 15:08:15 +0400 Subject: [PATCH 21/34] fix go mod --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index f60c8ac3c..7d2bf2ceb 100644 --- a/go.mod +++ b/go.mod @@ -333,6 +333,7 @@ require ( github.com/ipni/ipni-cli v0.1.1 github.com/ipni/storetheindex v0.8.1 github.com/schollz/progressbar/v3 v3.14.1 + go.uber.org/multierr v1.11.0 ) require ( @@ -375,7 +376,6 @@ require ( github.com/yugabyte/pgx/v4 v4.14.5 // indirect github.com/zyedidia/generic v1.2.1 // indirect go.opentelemetry.io/otel/metric v1.16.0 // indirect - go.uber.org/multierr v1.11.0 // indirect gonum.org/v1/gonum v0.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect ) From 7afb77d4b25c904b896fa14017a4a163c8b43f5e Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Tue, 19 Dec 2023 15:38:05 +0400 Subject: [PATCH 22/34] cleanup itest fixtures, fix gql --- extern/filecoin-ffi | 2 +- gql/module.go | 6 +- itests/data_segment_index_retrieval_test.go | 28 +--- itests/framework/fixtures.go | 164 -------------------- 4 files changed, 7 insertions(+), 193 deletions(-) delete mode 100644 itests/framework/fixtures.go diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi index bf5edd551..441fa8e61 160000 --- a/extern/filecoin-ffi +++ b/extern/filecoin-ffi @@ -1 +1 @@ -Subproject commit bf5edd551d23901fa565aac4ce94433afe0c278e +Subproject commit 441fa8e61189dc32c2960c1f8d8ba56269f20366 diff --git a/gql/module.go b/gql/module.go index c4e1134f1..efe888cf7 100644 --- a/gql/module.go +++ b/gql/module.go @@ -27,15 +27,15 @@ import ( "go.uber.org/fx" ) -func NewGraphqlServer(cfg *config.Boost) func(lc fx.Lifecycle, r repo.LockedRepo, h host.Host, prov *storagemarket.Provider, dealsDB *db.DealsDB, logsDB *db.LogsDB, retDB *rtvllog.RetrievalLogDB, plDB *db.ProposalLogsDB, fundsDB *db.FundsDB, fundMgr *fundmanager.FundManager, storageMgr *storagemanager.StorageManager, publisher *storageadapter.DealPublisher, spApi sealingpipeline.API, legacyDeals legacy.LegacyDealManager, piecedirectory *piecedirectory.PieceDirectory, indexProv provider.Interface, idxProvWrapper *indexprovider.Wrapper, fullNode v1api.FullNode, bg BlockGetter, ssm *sectorstatemgr.SectorStateMgr, mpool *mpoolmonitor.MpoolMonitor, mma *lib.MultiMinerAccessor, sask storedask.StoredAsk) *Server { - return func(lc fx.Lifecycle, r repo.LockedRepo, h host.Host, prov *storagemarket.Provider, dealsDB *db.DealsDB, logsDB *db.LogsDB, retDB *rtvllog.RetrievalLogDB, plDB *db.ProposalLogsDB, fundsDB *db.FundsDB, fundMgr *fundmanager.FundManager, +func NewGraphqlServer(cfg *config.Boost) func(lc fx.Lifecycle, r repo.LockedRepo, h host.Host, prov *storagemarket.Provider, ddProv *storagemarket.DirectDealsProvider, dealsDB *db.DealsDB, directDealsDB *db.DirectDealsDB, logsDB *db.LogsDB, retDB *rtvllog.RetrievalLogDB, plDB *db.ProposalLogsDB, fundsDB *db.FundsDB, fundMgr *fundmanager.FundManager, storageMgr *storagemanager.StorageManager, publisher *storageadapter.DealPublisher, spApi sealingpipeline.API, legacyDeals legacy.LegacyDealManager, piecedirectory *piecedirectory.PieceDirectory, indexProv provider.Interface, idxProvWrapper *indexprovider.Wrapper, fullNode v1api.FullNode, bg BlockGetter, ssm *sectorstatemgr.SectorStateMgr, mpool *mpoolmonitor.MpoolMonitor, mma *lib.MultiMinerAccessor, sask storedask.StoredAsk) *Server { + return func(lc fx.Lifecycle, r repo.LockedRepo, h host.Host, prov *storagemarket.Provider, ddProv *storagemarket.DirectDealsProvider, dealsDB *db.DealsDB, directDealsDB *db.DirectDealsDB, logsDB *db.LogsDB, retDB *rtvllog.RetrievalLogDB, plDB *db.ProposalLogsDB, fundsDB *db.FundsDB, fundMgr *fundmanager.FundManager, storageMgr *storagemanager.StorageManager, publisher *storageadapter.DealPublisher, spApi sealingpipeline.API, legacyDeals legacy.LegacyDealManager, piecedirectory *piecedirectory.PieceDirectory, indexProv provider.Interface, idxProvWrapper *indexprovider.Wrapper, fullNode v1api.FullNode, bg BlockGetter, ssm *sectorstatemgr.SectorStateMgr, mpool *mpoolmonitor.MpoolMonitor, mma *lib.MultiMinerAccessor, sask storedask.StoredAsk) *Server { resolverCtx, cancel := context.WithCancel(context.Background()) - resolver := NewResolver(resolverCtx, cfg, r, h, dealsDB, logsDB, retDB, plDB, fundsDB, fundMgr, storageMgr, spApi, prov, legacyDeals, piecedirectory, publisher, indexProv, idxProvWrapper, fullNode, ssm, mpool, mma, sask) + resolver := NewResolver(resolverCtx, cfg, r, h, dealsDB, directDealsDB, logsDB, retDB, plDB, fundsDB, fundMgr, storageMgr, spApi, prov, ddProv, legacyDeals, piecedirectory, publisher, indexProv, idxProvWrapper, fullNode, ssm, mpool, mma, sask) svr := NewServer(cfg, resolver, bg) lc.Append(fx.Hook{ diff --git a/itests/data_segment_index_retrieval_test.go b/itests/data_segment_index_retrieval_test.go index fe8c122f2..ed03c2705 100644 --- a/itests/data_segment_index_retrieval_test.go +++ b/itests/data_segment_index_retrieval_test.go @@ -13,6 +13,7 @@ import ( "github.com/google/uuid" "github.com/ipfs/go-cid" carv2 "github.com/ipld/go-car/v2" + trustlessutils "github.com/ipld/go-trustless-utils" "github.com/stretchr/testify/require" ) @@ -39,21 +40,6 @@ func TestDataSegmentIndexRetrieval(t *testing.T) { err = f.AddClientProviderBalance(abi.NewTokenAmount(1e15)) require.NoError(t, err) - //// Create a CAR file - //tempdir := t.TempDir() - //log.Debugw("using tempdir", "dir", tempdir) - // - //// Select the number of car segments to use in test - //seg := 2 - // - //// Generate car file containing multiple car files - //segmentDetails, err := framework.GenerateDataSegmentFiles(t, tempdir, seg) - //require.NoError(t, err) - // - //p := segmentDetails.Piece.PieceCID.String() - // - //log.Info(p) - // Start a web server to serve the car files log.Debug("starting webserver") server, err := testutil.HttpTestFileServer(t, "fixtures") @@ -78,27 +64,19 @@ func TestDataSegmentIndexRetrieval(t *testing.T) { err = f.WaitForDealAddedToSector(dealUuid) require.NoError(t, err) - ////Retrieve and compare the all car files within the deal - //for i := 0; i < seg; i++ { - // for _, r := range segmentDetails.Segments[i].Root { - // outFile := f.RetrieveDirect(ctx, t, r, &res.DealParams.ClientDealProposal.Proposal.PieceCID, true) - // kit.AssertFilesEqual(t, segmentDetails.Segments[i].FilePath, outFile) - // } - //} - r1, err := cid.Parse("bafykbzaceaqliwrg6y2bxrhhbbiz3nknhz43yj2bqog4rulu5km5qhkckffuw") require.NoError(t, err) r2, err := cid.Parse("bafykbzaceccq64xf6yadlbmqpfindtf5x3cssel2fozkhvdyrrtnjnutr5j52") require.NoError(t, err) - outF1 := f.RetrieveDirect(ctx, t, r1, &pieceCid, false, nil) + outF1 := f.Retrieve(ctx, t, trustlessutils.Request{Root: r1, Scope: trustlessutils.DagScopeAll}, false) r, err := carv2.OpenReader(outF1) require.NoError(t, err) rs, err := r.Roots() require.NoError(t, err) require.Equal(t, r1, rs[0]) r.Close() - outf2 := f.RetrieveDirect(ctx, t, r2, &pieceCid, false, nil) + outf2 := f.Retrieve(ctx, t, trustlessutils.Request{Root: r2, Scope: trustlessutils.DagScopeAll}, false) r, err = carv2.OpenReader(outf2) require.NoError(t, err) rs, err = r.Roots() diff --git a/itests/framework/fixtures.go b/itests/framework/fixtures.go deleted file mode 100644 index 1aaa8df82..000000000 --- a/itests/framework/fixtures.go +++ /dev/null @@ -1,164 +0,0 @@ -package framework - -import ( - "errors" - "io" - "math/bits" - "os" - "testing" - - "github.com/filecoin-project/boost/storagemarket" - "github.com/filecoin-project/go-data-segment/datasegment" - commcid "github.com/filecoin-project/go-fil-commcid" - commp "github.com/filecoin-project/go-fil-commp-hashhash" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/itests/kit" - "github.com/ipfs/go-cid" - "github.com/ipld/go-car/v2" -) - -type CarDetails struct { - CarPath string - Root []cid.Cid - FilePath string -} - -type SegmentDetails struct { - Piece *abi.PieceInfo - Segments []*CarDetails - CarPath string - CarSize int64 -} - -func GenerateDataSegmentFiles(t *testing.T, tmpdir string, num int) (SegmentDetails, error) { - if num < 2 { - return SegmentDetails{}, errors.New("at least 2 deals are required to test data segment index") - } - - fileSize := 1572864 - - var cars []*CarDetails - for i := 1; i <= num; i++ { - - carPath, filePath := kit.CreateRandomCARv1(t, i, fileSize) - rd, err := car.OpenReader(carPath) - if err != nil { - return SegmentDetails{}, err - } - - roots, err := rd.Roots() - if err != nil { - return SegmentDetails{}, err - } - - err = rd.Close() - if err != nil { - return SegmentDetails{}, err - } - - cars = append(cars, &CarDetails{ - CarPath: carPath, - FilePath: filePath, - Root: roots, - }) - } - - finalCar, err := os.CreateTemp(tmpdir, "finalcar") - if err != nil { - return SegmentDetails{}, err - } - - err = generateDataSegmentCar(cars, finalCar) - if err != nil { - return SegmentDetails{}, err - } - - finalCarName := finalCar.Name() - carStat, err := finalCar.Stat() - if err != nil { - return SegmentDetails{}, err - } - carSize := carStat.Size() - err = finalCar.Close() - if err != nil { - return SegmentDetails{}, err - } - - cidAndSize, err := storagemarket.GenerateCommPLocally(finalCarName) - if err != nil { - return SegmentDetails{}, err - } - - return SegmentDetails{ - Piece: cidAndSize, - Segments: cars, - CarPath: finalCarName, - CarSize: carSize, - }, nil -} - -func generateDataSegmentCar(cars []*CarDetails, outputFile *os.File) error { - - readers := make([]io.Reader, 0) - deals := make([]abi.PieceInfo, 0) - - for _, cf := range cars { - - r, err := os.Open(cf.CarPath) - - if err != nil { - return err - } - - readers = append(readers, r) - cp := new(commp.Calc) - - _, err = io.Copy(cp, r) - if err != nil { - return err - } - - rawCommP, size, err := cp.Digest() - if err != nil { - return err - } - - _, err = r.Seek(0, io.SeekStart) - if err != nil { - return err - } - - c, _ := commcid.DataCommitmentV1ToCID(rawCommP) - - subdeal := abi.PieceInfo{ - Size: abi.PaddedPieceSize(size), - PieceCID: c, - } - deals = append(deals, subdeal) - } - - _, size, err := datasegment.ComputeDealPlacement(deals) - if err != nil { - return err - } - - overallSize := abi.PaddedPieceSize(size) - // we need to make this the 'next' power of 2 in order to have space for the index - next := 1 << (64 - bits.LeadingZeros64(uint64(overallSize+256))) - - a, err := datasegment.NewAggregate(abi.PaddedPieceSize(next), deals) - if err != nil { - return err - } - out, err := a.AggregateObjectReader(readers) - if err != nil { - return err - } - - _, err = io.Copy(outputFile, out) - if err != nil { - return err - } - - return nil -} From 370786d5b5a42f4033e5509e867a8a4590eddff0 Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Tue, 19 Dec 2023 15:45:33 +0400 Subject: [PATCH 23/34] go mod tidy --- go.sum | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/go.sum b/go.sum index 7278a3b8d..061b3b9d8 100644 --- a/go.sum +++ b/go.sum @@ -380,7 +380,7 @@ github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.1.6/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q= github.com/filecoin-project/go-state-types v0.1.10/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q= -github.com/filecoin-project/go-state-types v0.11.1/go.mod h1:SyNPwTsU7I22gL2r0OAPcImvLoTVfgRwdK/Y5rR1zz8= +github.com/filecoin-project/go-state-types v0.11.2-0.20230712101859-8f37624fa540/go.mod h1:SyNPwTsU7I22gL2r0OAPcImvLoTVfgRwdK/Y5rR1zz8= github.com/filecoin-project/go-state-types v0.12.8 h1:W/UObdAsv+LbB9EfyLg92DSYoatzUWmlfV8FGyh30VA= github.com/filecoin-project/go-state-types v0.12.8/go.mod h1:gR2NV0CSGSQwopxF+3In9nDh1sqvoYukLcs5vK0AHCA= github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= @@ -2503,6 +2503,7 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= From f1b93de2d2cccb8cdc992996620317b4cfe5f3db Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Tue, 19 Dec 2023 16:07:44 +0400 Subject: [PATCH 24/34] fix directDealProv --- cmd/booster-http/piecehandler.go | 4 +- go.mod | 2 - go.sum | 4 - go.work.sum | 218 +----------------------- indexprovider/mock/mock.go | 273 ------------------------------- node/modules/directdeals.go | 6 +- 6 files changed, 5 insertions(+), 502 deletions(-) delete mode 100644 indexprovider/mock/mock.go diff --git a/cmd/booster-http/piecehandler.go b/cmd/booster-http/piecehandler.go index 005c20551..f71fe040a 100644 --- a/cmd/booster-http/piecehandler.go +++ b/cmd/booster-http/piecehandler.go @@ -11,10 +11,10 @@ import ( "time" "github.com/NYTimes/gziphandler" - "github.com/filecoin-project/boost-gfm/retrievalmarket" "github.com/filecoin-project/boost/extern/boostd-data/model" "github.com/filecoin-project/boost/extern/boostd-data/shared/tracing" "github.com/filecoin-project/boost/metrics" + "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" "github.com/hashicorp/go-multierror" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" @@ -143,7 +143,7 @@ func isNotFoundError(err error) bool { switch { case errors.Is(err, ErrNotFound), errors.Is(err, datastore.ErrNotFound), - errors.Is(err, retrievalmarket.ErrNotFound), + errors.Is(err, legacyretrievaltypes.ErrNotFound), strings.Contains(strings.ToLower(err.Error()), "not found"): return true default: diff --git a/go.mod b/go.mod index 7d2bf2ceb..860dc0728 100644 --- a/go.mod +++ b/go.mod @@ -317,7 +317,6 @@ require ( ) require ( - github.com/filecoin-project/boost-gfm v1.26.7 github.com/filecoin-project/boost-graphsync v0.13.9 github.com/filecoin-project/boost/extern/boostd-data v0.0.0-20231124125934-3233c510357f github.com/filecoin-project/go-data-segment v0.0.1 @@ -340,7 +339,6 @@ require ( github.com/Jorropo/jsync v1.0.1 // indirect github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5 // indirect github.com/fatih/color v1.13.0 // indirect - github.com/filecoin-project/go-data-transfer v1.15.4-boost // indirect github.com/filecoin-project/go-data-transfer/v2 v2.0.0-rc7 // indirect github.com/filecoin-project/go-fil-markets v1.28.3 // indirect github.com/filecoin-project/kubo-api-client v0.0.2-0.20230829103503-14448166d14d // indirect diff --git a/go.sum b/go.sum index 061b3b9d8..7d2da3cf0 100644 --- a/go.sum +++ b/go.sum @@ -309,8 +309,6 @@ github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGj github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= -github.com/filecoin-project/boost-gfm v1.26.7 h1:ENJEqx1OzY072QnUP37YrGVmUiCewRwHAjbtTxyW74Y= -github.com/filecoin-project/boost-gfm v1.26.7/go.mod h1:OhG2y7WeDx3KU9DPjgWllS+3/ospPjm8/XDrvN6uOfk= github.com/filecoin-project/boost-graphsync v0.13.9 h1:RQepfTlffLGUmp3Ff7VosYrWUKPLiz++GGV2D/gIfuw= github.com/filecoin-project/boost-graphsync v0.13.9/go.mod h1:bc2M5ZLZJtXHl8kjnqtn4L1MsdEqpJErDaIeY0bJ9wk= github.com/filecoin-project/boost/extern/boostd-data v0.0.0-20231124125934-3233c510357f h1:8dd0yAadyeOL5Qd42XhEwD60UKvIFkY2MLhef/IaeOk= @@ -346,8 +344,6 @@ github.com/filecoin-project/go-crypto v0.0.1 h1:AcvpSGGCgjaY8y1az6AMfKQWreF/pWO2 github.com/filecoin-project/go-crypto v0.0.1/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= github.com/filecoin-project/go-data-segment v0.0.1 h1:1wmDxOG4ubWQm3ZC1XI5nCon5qgSq7Ra3Rb6Dbu10Gs= github.com/filecoin-project/go-data-segment v0.0.1/go.mod h1:H0/NKbsRxmRFBcLibmABv+yFNHdmtl5AyplYLnb0Zv4= -github.com/filecoin-project/go-data-transfer v1.15.4-boost h1:rGsPDeDk0nbzLOPn/9iCIrhLNy69Vkr9tRBcetM4kd0= -github.com/filecoin-project/go-data-transfer v1.15.4-boost/go.mod h1:S5Es9uoD+3TveYyGjxZInAF6mSQtRjNzezV7Y7Sh8X0= github.com/filecoin-project/go-data-transfer/v2 v2.0.0-rc7 h1:v+zJS5B6pA3ptWZS4t8tbt1Hz9qENnN4nVr1w99aSWc= github.com/filecoin-project/go-data-transfer/v2 v2.0.0-rc7/go.mod h1:V3Y4KbttaCwyg1gwkP7iai8CbQx4mZUGjd3h9GZWLKE= github.com/filecoin-project/go-ds-versioning v0.1.2 h1:to4pTadv3IeV1wvgbCbN6Vqd+fu+7tveXgv/rCEZy6w= diff --git a/go.work.sum b/go.work.sum index 42397c319..4e681419e 100644 --- a/go.work.sum +++ b/go.work.sum @@ -649,6 +649,7 @@ github.com/filecoin-project/boost/extern/boostd-data v0.0.0-20231009154452-ca8da github.com/filecoin-project/filecoin-ffi v0.30.4-0.20220519234331-bfd1f5f9fe38/go.mod h1:GM5pXRYvQM7wyH6V2WtPnJ2k1jt+qotRkWLxBSRCOuE= github.com/filecoin-project/go-dagaggregator-unixfs v0.3.0 h1:UXLtBUnPa61LkNa2GqhP+aJ53bOnHP/dzg6/wk2rnsA= github.com/filecoin-project/go-dagaggregator-unixfs v0.3.0/go.mod h1:UTWmEgyqq7RMx56AeHY/uEoLq1dJTPAirjyBPas4IQQ= +github.com/filecoin-project/go-data-transfer v1.15.2 h1:PzqsFr2Q/onMGKrGh7TtRT0dKsJcVJrioJJnjnKmxlk= github.com/filecoin-project/go-indexer-core v0.2.16 h1:1SmJVhfHTsi0CC+U6JdyjIIQtOqmKvCl/tqpI3gI+18= github.com/filecoin-project/go-legs v0.4.9 h1:9ccbv5zDPqMviEpSpf0TdfKKI64TMYGSiuY2A1EXHFY= github.com/filecoin-project/go-retrieval-types v1.2.0/go.mod h1:ojW6wSw2GPyoRDBGqw1K6JxUcbfa5NOSIiyQEeh7KK0= @@ -894,7 +895,6 @@ github.com/ipfs/go-ipns v0.3.0 h1:ai791nTgVo+zTuq2bLvEGmWP1M0A6kGTXUsgv/Yq67A= github.com/ipfs/go-ipns v0.3.0/go.mod h1:3cLT2rbvgPZGkHJoPO1YMJeh6LtkxopCkKFcio/wE24= github.com/ipfs/go-libipfs v0.4.0/go.mod h1:XsU2cP9jBhDrXoJDe0WxikB8XcVmD3k2MEZvB3dbYu8= github.com/ipfs/go-libipfs v0.6.0/go.mod h1:UjjDIuehp2GzlNP0HEr5I9GfFT7zWgst+YfpUEIThtw= -github.com/ipfs/go-libipfs v0.7.0/go.mod h1:KsIf/03CqhICzyRGyGo68tooiBE2iFbI/rXW7FhAYr0= github.com/ipfs/go-merkledag v0.9.0/go.mod h1:bPHqkHt5OZ0p1n3iqPeDiw2jIBkjAytRjS3WSBwjq90= github.com/ipfs/go-merkledag v0.10.0/go.mod h1:zkVav8KiYlmbzUzNM6kENzkdP5+qR7+2mCwxkQ6GIj8= github.com/ipfs/go-metrics-prometheus v0.0.2 h1:9i2iljLg12S78OhC6UAiXi176xvQGiZaGVF1CUVdE+s= @@ -929,20 +929,11 @@ github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236 github.com/ipld/go-ipld-prime/storage/dsadapter v0.0.0-20230102063945-1a409dc236dd h1:qdjo1CRvAQhOMoyYjPnbdZ5rYFFmqztweQ9KAsuWpO0= github.com/ipld/go-ipld-prime/storage/dsadapter v0.0.0-20230102063945-1a409dc236dd/go.mod h1:9DD/GM0JNPoisgR09F62kbBi7kHa4eDIea4XshXYOVc= github.com/ipld/go-storethehash v0.1.7 h1:c54J7WTBAjKfnSMC4TL7RLFNIY5ws40IzljKKW8zUAw= -github.com/ipld/ipld/specs v0.0.0-20231012031213-54d3b21deda4/go.mod h1:WcT0DfRe+e2QFY0kcbsOnuT6jL5Q0JNZ83I5DHIdStg= github.com/ipni/go-indexer-core v0.8.0 h1:HPFMngR47FL49mVnOZBrcxJoRODjIadlP+UYMRboNKA= github.com/ipni/go-indexer-core v0.8.0/go.mod h1:Y9su+no9k6y+jnQRERP/CKJewdISHzzl+n91GA+y4Ao= github.com/ipni/go-libipni v0.0.8/go.mod h1:paYP9U4N3/vOzGCuN9kU972vtvw9JUcQjOKyiCFGwRk= github.com/ipni/go-libipni v0.5.0/go.mod h1:UnrhEqjVI2Z2HXlaieOBONJmtW557nZkYpB4IIsMD+s= -github.com/ipni/go-libipni v0.5.2 h1:9vaYOnR4dskd8p88NOboqI6yVqBwYPNCQ/zOaRSr59I= -github.com/ipni/go-libipni v0.5.2/go.mod h1:UnrhEqjVI2Z2HXlaieOBONJmtW557nZkYpB4IIsMD+s= github.com/ipni/index-provider v0.12.0/go.mod h1:GhyrADJp7n06fqoc1djzkvL4buZYHzV8SoWrlxEo5F4= -github.com/ipni/index-provider v0.14.2 h1:daA3IFnI2n2x/mL0K91SQHNLq6Vvfp5q4uFX9G4glvE= -github.com/ipni/index-provider v0.14.2/go.mod h1:mArx7Ou3Y62fIDSj9a1Neh5G14xQcwXGbfEbf47vyuM= -github.com/ipni/ipni-cli v0.1.1 h1:TjYAf5CrVx/loQtWQnwEnIYjW7hvRJDRyIibT7WbHjE= -github.com/ipni/ipni-cli v0.1.1/go.mod h1:TAOkJwc9OBsx5gRy8iyoWgb8AbtJcT482cJUGDTxnHg= -github.com/ipni/storetheindex v0.8.1 h1:3uHclkcQWlIXQx+We4tbGF/XzoZYERz3so34xQbUmZE= -github.com/ipni/storetheindex v0.8.1/go.mod h1:K4AR2bRll46YCWeGvob5foN/Z/kuovPdlUeJKOHVQHo= github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A= github.com/jackc/pgx/v5 v5.4.1 h1:oKfB/FhuVtit1bBM3zNRRsZ925ZkMN3HXL+LgLUM9lE= @@ -962,19 +953,12 @@ github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJk github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e h1:UvSe12bq+Uj2hWd8aOlwPmoZ+CITRFrdit+sDGfAg8U= github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1 h1:ujPKutqRlJtcfWk6toYVYagwra7HQHbXOaS171b4Tg8= -github.com/jellydator/ttlcache/v2 v2.11.1 h1:AZGME43Eh2Vv3giG6GeqeLeFXxwxn1/qHItqWZl6U64= -github.com/jellydator/ttlcache/v2 v2.11.1/go.mod h1:RtE5Snf0/57e+2cLWFYWCCsLas2Hy3c5Z4n14XmSvTI= -github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA= -github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/jonboulle/clockwork v0.3.0 h1:9BSCMi8C+0qdApAp4auwX0RkLGUjs956h0EkuQymUhg= github.com/jonboulle/clockwork v0.3.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jrick/logrotate v1.0.0 h1:lQ1bL/n9mBNeIXoTUoYRlK4dHuNJVofX9oWqBtPnSzI= github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4 h1:nwOc1YaOrYJ37sEBrtWZrdqzK22hiJs3GpDmP3sR2Yw= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -985,36 +969,27 @@ github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+ github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5 h1:PJr+ZMXIecYc1Ey2zucXdR73SMBtgjPgwa31099IMv0= github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef h1:2jNeR4YUziVtswNP9sEFAI913cVrzH85T+8Q6LpYbT0= github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213 h1:qGQQKEcAR99REcMpsXCp3lJ03zYT1PkRd3kQGPn9GVg= -github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213/go.mod h1:vNUNkEQ1e29fT/6vq2aBdFsgNPmy8qMdSay1npru+Sw= github.com/kabukky/httpscerts v0.0.0-20150320125433-617593d7dcb3 h1:Iy7Ifq2ysilWU4QlCx/97OoI4xT1IV7i8byT/EyIT/M= github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d h1:cVtBfNW5XTHiKQe7jDaDBSh/EVM4XLPutLAGboIXuM0= github.com/karalabe/usb v0.0.2 h1:M6QQBNxF+CQ8OFvxrT90BA0qBOXymndZnk5q235mFc4= github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= github.com/karrick/godirwalk v1.10.12 h1:BqUm+LuJcXjGv1d2mj3gBiQyrQ57a0rYoAmhvJQ7RDU= github.com/karrick/godirwalk v1.10.12/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= -github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= -github.com/kilic/bls12-381 v0.1.0 h1:encrdjqKMEvabVQ7qYOKu1OvhqpK4s47wDYtNiPtlp4= -github.com/kilic/bls12-381 v0.1.0/go.mod h1:vDTTHJONJ6G+P2R74EhnyotQDTliQDnFEwhdmfzw1ig= github.com/kisielk/errcheck v1.5.0 h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY= github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 h1:FOOIBWrEkLgmlgGfMuZT83xIwfPDxEI2OHu6xUmJMFE= github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4= github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= -github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5 h1:2U0HzY8BJ8hVwDKIzp7y4voR9CX/nvcfymLmg2UiOio= -github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/klauspost/cpuid/v2 v2.1.1/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 h1:KAZ1BW2TCmT6PRihDPpocIy1QTtsAsrx6TneU/4+CMg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada h1:3L+neHp83cTjegPdCiOxVOJtRIy7/8RldvMTsyPYH10= github.com/koalacxr/quantile v0.0.1 h1:wAW+SQ286Erny9wOjVww96t8ws+x5Zj6AKHDULUK+o0= github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= -github.com/koron/go-ssdp v0.0.3/go.mod h1:b2MxI6yh02pKrsyNoQUsk4+YNikaGhe4894J+Q5lDvA= -github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= @@ -1033,49 +1008,30 @@ github.com/libp2p/go-conn-security-multistream v0.3.0 h1:9UCIKlBL1hC9u7nkMXpD1nk github.com/libp2p/go-doh-resolver v0.4.0 h1:gUBa1f1XsPwtpE1du0O+nnZCUqtG7oYi7Bb+0S7FQqw= github.com/libp2p/go-doh-resolver v0.4.0/go.mod h1:v1/jwsFusgsWIGX/c6vCRrnJ60x7bhTiq/fs2qt0cAg= github.com/libp2p/go-eventbus v0.2.1 h1:VanAdErQnpTioN2TowqNcOijf6YwhuODe4pPKSDpxGc= -github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= -github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.22.0/go.mod h1:UDolmweypBSjQb2f7xutPnwZ/fxioLbMBxSjRksxxU4= github.com/libp2p/go-libp2p v0.23.4/go.mod h1:s9DEa5NLR4g+LZS+md5uGU4emjMWFiqkZr6hBTY8UxI= github.com/libp2p/go-libp2p v0.25.1/go.mod h1:xnK9/1d9+jeQCVvi/f1g12KqtVi/jP/SijtKV1hML3g= github.com/libp2p/go-libp2p v0.26.3/go.mod h1:x75BN32YbwuY0Awm2Uix4d4KOz+/4piInkp4Wr3yOo8= github.com/libp2p/go-libp2p v0.27.6/go.mod h1:oMfQGTb9CHnrOuSM6yMmyK2lXz3qIhnkn2+oK3B1Y2g= github.com/libp2p/go-libp2p v0.27.7/go.mod h1:oMfQGTb9CHnrOuSM6yMmyK2lXz3qIhnkn2+oK3B1Y2g= -github.com/libp2p/go-libp2p v0.31.0 h1:LFShhP8F6xthWiBBq3euxbKjZsoRajVEyBS9snfHxYg= -github.com/libp2p/go-libp2p v0.31.0/go.mod h1:W/FEK1c/t04PbRH3fA9i5oucu5YcgrG0JVoBWT1B7Eg= -github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNzVVaMNmq2NACuqyB9w= github.com/libp2p/go-libp2p-autonat v0.7.0 h1:rCP5s+A2dlhM1Xd66wurE0k7S7pPmM0D+FlqqSBXxks= github.com/libp2p/go-libp2p-autonat-svc v0.1.0 h1:28IM7iWMDclZeVkpiFQaWVANwXwE7zLlpbnS7yXxrfs= github.com/libp2p/go-libp2p-blankhost v0.4.0 h1:LXQsrdCsYtmlV07NClqmR/5xPW2JuTD/vC82aaHQ5Y4= github.com/libp2p/go-libp2p-blankhost v0.4.0/go.mod h1:Ugc8dxkVEpcRxUhxDFYITLmu60bN9RabRquN+ZETjEo= github.com/libp2p/go-libp2p-circuit v0.6.0 h1:rw/HlhmUB3OktS/Ygz6+2XABOmHKzZpPUuMNUMosj8w= github.com/libp2p/go-libp2p-connmgr v0.4.0 h1:q/KZUS1iMDIQckMZarMYwhQisJqiFPHAVC1c4DR3hDE= -github.com/libp2p/go-libp2p-consensus v0.0.1 h1:jcVbHRZLwTXU9iT/mPi+Lx4/OrIzq3bU1TbZNhYFCV8= -github.com/libp2p/go-libp2p-consensus v0.0.1/go.mod h1:+9Wrfhc5QOqWB0gXI0m6ARlkHfdJpcFXmRU0WoHz4Mo= -github.com/libp2p/go-libp2p-core v0.19.0/go.mod h1:AkA+FUKQfYt1FLNef5fOPlo/naAWjKy/RCjkcPjqzYg= github.com/libp2p/go-libp2p-core v0.20.0 h1:PGKM74+T+O/FaZNARNW32i90RMBHCcgd/hkum2UQ5eY= github.com/libp2p/go-libp2p-core v0.20.0/go.mod h1:6zR8H7CvQWgYLsbG4on6oLNSGcyKaYFSEYyDt51+bIY= github.com/libp2p/go-libp2p-crypto v0.1.0 h1:k9MFy+o2zGDNGsaoZl0MA3iZ75qXxr9OOoAZF+sD5OQ= github.com/libp2p/go-libp2p-daemon v0.2.2 h1:qKcrv0c4XEcdV9LzY+2IDUxS8xMsTVzxUD1K3i1Bdgg= github.com/libp2p/go-libp2p-discovery v0.7.0 h1:6Iu3NyningTb/BmUnEhcTwzwbs4zcywwbfTulM9LHuc= -github.com/libp2p/go-libp2p-gorpc v0.5.0 h1:mmxxAPdP3JzpYH4KcDf4csXnqtd1HazLPfdyB2MBRb8= -github.com/libp2p/go-libp2p-gorpc v0.5.0/go.mod h1:GpHuvY3m0YFkd0+inOGo4HDtc4up9OS/mBPXvEpNuRY= -github.com/libp2p/go-libp2p-gostream v0.6.0 h1:QfAiWeQRce6pqnYfmIVWJFXNdDyfiR/qkCnjyaZUPYU= -github.com/libp2p/go-libp2p-gostream v0.6.0/go.mod h1:Nywu0gYZwfj7Jc91PQvbGU8dIpqbQQkjWgDuOrFaRdA= github.com/libp2p/go-libp2p-host v0.0.3 h1:BB/1Z+4X0rjKP5lbQTmjEjLbDVbrcmLOlA6QDsN5/j4= -github.com/libp2p/go-libp2p-http v0.5.0 h1:+x0AbLaUuLBArHubbbNRTsgWz0RjNTy6DJLOxQ3/QBc= -github.com/libp2p/go-libp2p-http v0.5.0/go.mod h1:glh87nZ35XCQyFsdzZps6+F4HYI6DctVFY5u1fehwSg= github.com/libp2p/go-libp2p-interface-connmgr v0.0.5 h1:KG/KNYL2tYzXAfMvQN5K1aAGTYSYUMJ1prgYa2/JI1E= github.com/libp2p/go-libp2p-interface-pnet v0.0.1 h1:7GnzRrBTJHEsofi1ahFdPN9Si6skwXQE9UqR2S+Pkh8= github.com/libp2p/go-libp2p-kad-dht v0.21.0/go.mod h1:Bhm9diAFmc6qcWAr084bHNL159srVZRKADdp96Qqd1I= github.com/libp2p/go-libp2p-kad-dht v0.23.0/go.mod h1:oO5N308VT2msnQI6qi5M61wzPmJYg7Tr9e16m5n7uDU= github.com/libp2p/go-libp2p-kad-dht v0.24.0/go.mod h1:lfu5T01EH+r6uDZ/8G+ObhwgzVyd0b1nb54AdT8XGhc= -github.com/libp2p/go-libp2p-kad-dht v0.24.2 h1:zd7myKBKCmtZBhI3I0zm8xBkb28v3gmSEtQfBdAdFwc= -github.com/libp2p/go-libp2p-kad-dht v0.24.2/go.mod h1:BShPzRbK6+fN3hk8a0WGAYKpb8m4k+DtchkqouGTrSg= github.com/libp2p/go-libp2p-kbucket v0.5.0/go.mod h1:zGzGCpQd78b5BNTDGHNDLaTt9aDK/A02xeZp9QeFC4U= github.com/libp2p/go-libp2p-kbucket v0.6.1/go.mod h1:dvWO707Oq/vhMVuUhyfLkw0QsOrJFETepbNfpVHSELI= -github.com/libp2p/go-libp2p-kbucket v0.6.3 h1:p507271wWzpy2f1XxPzCQG9NiN6R6lHL9GiSErbQQo0= -github.com/libp2p/go-libp2p-kbucket v0.6.3/go.mod h1:RCseT7AH6eJWxxk2ol03xtP9pEHetYSPXOaJnOiD8i0= github.com/libp2p/go-libp2p-loggables v0.1.0 h1:h3w8QFfCt2UJl/0/NW4K829HX/0S4KD31PQ7m8UXXO8= github.com/libp2p/go-libp2p-metrics v0.0.1 h1:yumdPC/P2VzINdmcKZd0pciSUCpou+s0lwYCjBbzQZU= github.com/libp2p/go-libp2p-mplex v0.6.0 h1:5ubK4/vLE2JkogKlJ2JLeXcSfA6qY6mE2HMJV9ve/Sk= @@ -1087,37 +1043,22 @@ github.com/libp2p/go-libp2p-peer v0.2.0 h1:EQ8kMjaCUwt/Y5uLgjT8iY2qg0mGUT0N1zUje github.com/libp2p/go-libp2p-peerstore v0.7.0 h1:2iIUwok3vtmnWJTZeTeLgnBO6GbkXcwSRwgZHEKrQZs= github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k= github.com/libp2p/go-libp2p-protocol v0.1.0 h1:HdqhEyhg0ToCaxgMhnOmUO8snQtt/kQlcjVk3UoJU3c= -github.com/libp2p/go-libp2p-pubsub v0.9.3 h1:ihcz9oIBMaCK9kcx+yHWm3mLAFBMAUsM4ux42aikDxo= -github.com/libp2p/go-libp2p-pubsub v0.9.3/go.mod h1:RYA7aM9jIic5VV47WXu4GkcRxRhrdElWf8xtyli+Dzc= github.com/libp2p/go-libp2p-pubsub-router v0.6.0 h1:D30iKdlqDt5ZmLEYhHELCMRj8b4sFAqrUcshIUvVP/s= github.com/libp2p/go-libp2p-pubsub-router v0.6.0/go.mod h1:FY/q0/RBTKsLA7l4vqC2cbRbOvyDotg8PJQ7j8FDudE= github.com/libp2p/go-libp2p-quic-transport v0.17.0 h1:yFh4Gf5MlToAYLuw/dRvuzYd1EnE2pX3Lq1N6KDiWRQ= -github.com/libp2p/go-libp2p-raft v0.4.0 h1:2atEs7/FWH35bRiLh8gTejGh5NA9u4eG7BXjpf/B+Z4= -github.com/libp2p/go-libp2p-raft v0.4.0/go.mod h1:qJCYtFBTbip2wngLxFeAb9o52XmAPi2vSIQ4hV7IpSA= -github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= github.com/libp2p/go-libp2p-resource-manager v0.3.0 h1:2+cYxUNi33tcydsVLt6K5Fv2E3OTiVeafltecAj15E0= github.com/libp2p/go-libp2p-routing v0.1.0 h1:hFnj3WR3E2tOcKaGpyzfP4gvFZ3t8JkQmbapN0Ct+oU= github.com/libp2p/go-libp2p-routing-helpers v0.7.0/go.mod h1:R289GUxUMzRXIbWGSuUUTPrlVJZ3Y/pPz495+qgXJX8= -github.com/libp2p/go-libp2p-routing-helpers v0.7.1 h1:kc0kWCZecbBPAiFEHhxfGJZPqjg1g9zV+X+ovR4Tmnc= -github.com/libp2p/go-libp2p-routing-helpers v0.7.1/go.mod h1:cHStPSRC/wgbfpb5jYdMP7zaSmc2wWcb1mkzNr6AR8o= github.com/libp2p/go-libp2p-secio v0.2.2 h1:rLLPvShPQAcY6eNurKNZq3eZjPWfU9kXF2eI9jIYdrg= github.com/libp2p/go-libp2p-swarm v0.11.0 h1:ITgsTEY2tA4OxFJGcWeugiMh2x5+VOEnI2JStT1EWxI= -github.com/libp2p/go-libp2p-testing v0.11.0/go.mod h1:qG4sF27dfKFoK9KlVzK2y52LQKhp0VEmLjV5aDqr1Hg= -github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= github.com/libp2p/go-libp2p-tls v0.5.0 h1:aRNTeOI8Ljm1r4L2uMGxkMsVnyZoPwaqQqMw23qAsQs= github.com/libp2p/go-libp2p-transport v0.0.5 h1:pV6+UlRxyDpASSGD+60vMvdifSCby6JkJDfi+yUMHac= github.com/libp2p/go-libp2p-transport-upgrader v0.7.1 h1:MSMe+tUfxpC9GArTz7a4G5zQKQgGh00Vio87d3j3xIg= github.com/libp2p/go-libp2p-xor v0.1.0 h1:hhQwT4uGrBcuAkUGXADuPltalOdpf9aag9kaYNT2tLA= github.com/libp2p/go-libp2p-xor v0.1.0/go.mod h1:LSTM5yRnjGZbWNTA/hRwq2gGFrvRIbQJscoIL/u6InY= github.com/libp2p/go-libp2p-yamux v0.9.1 h1:oplewiRix8s45SOrI30rCPZG5mM087YZp+VYhXAh4+c= -github.com/libp2p/go-maddr-filter v0.1.0 h1:4ACqZKw8AqiuJfwFGq1CYDFugfXTOos+qQ3DETkhtCE= github.com/libp2p/go-mplex v0.7.0 h1:BDhFZdlk5tbr0oyFq/xv/NPGfjbnrsDam1EvutpBDbY= -github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= -github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk= -github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ= github.com/libp2p/go-reuseport v0.3.0/go.mod h1:laea40AimhtfEqysZ71UpYj4S+R9VpH8PgqLo7L+SwI= -github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= -github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= github.com/libp2p/go-reuseport-transport v0.1.0 h1:C3PHeHjmnz8m6f0uydObj02tMEoi7CyD1zuN7xQT8gc= github.com/libp2p/go-sockaddr v0.1.1 h1:yD80l2ZOdGksnOyHrhxDdTDFrf7Oy+v3FMVArIRgZxQ= github.com/libp2p/go-socket-activation v0.1.0 h1:OImQPhtbGlCNaF/KSTl6pBBy+chA5eBt5i9uMJNtEdY= @@ -1130,50 +1071,29 @@ github.com/libp2p/go-yamux v1.4.1 h1:P1Fe9vF4th5JOxxgQvfbOHkrGqIZniTLf+ddhZp8YTI github.com/libp2p/go-yamux/v2 v2.3.0 h1:luRV68GS1vqqr6EFUjtu1kr51d+IbW0gSowu8emYWAI= github.com/libp2p/go-yamux/v3 v3.1.2 h1:lNEy28MBk1HavUAlzKgShp+F6mn/ea1nDYWftZhFW9Q= github.com/libp2p/go-yamux/v4 v4.0.0/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= -github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ= -github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= github.com/libp2p/zeroconf/v2 v2.2.0 h1:Cup06Jv6u81HLhIj1KasuNM/RHHrJ8T7wOTS4+Tv53Q= -github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743 h1:143Bb8f8DuGWck/xpNUOckBVYfFbBTnLevfRZ1aVVqo= github.com/lightstep/lightstep-tracer-go v0.18.1 h1:vi1F1IQ8N7hNWytK9DpJsUfQhGuNSc19z330K6vl4zk= -github.com/lucas-clemente/quic-go v0.28.1/go.mod h1:oGz5DKK41cJt5+773+BSO9BXDsREY4HLf7+0odGAPO0= github.com/lucas-clemente/quic-go v0.29.1 h1:Z+WMJ++qMLhvpFkRZA+jl3BTxUjm415YBmWanXB8zP0= github.com/lucas-clemente/quic-go v0.29.1/go.mod h1:CTcNfLYJS2UuRNB+zcNlgvkjBhxX6Hm3WUxxAQx2mgE= -github.com/lucasb-eyer/go-colorful v1.0.3 h1:QIbQXiugsb+q10B+MI+7DI1oQLdmnep86tWFlaaUAac= github.com/lufia/iostat v1.1.0 h1:Z1wa4Hhxwi8uSKfgRsFc5RLtt3SuFPIOgkiPGkUtHDY= github.com/lunixbochs/vtclean v1.0.0 h1:xu2sLAri4lGiovBDQKxl5mrXyESr3gUr5m5SM5+LVb8= github.com/lyft/protoc-gen-star/v2 v2.0.1 h1:keaAo8hRuAT0O3DfJ/wM3rufbAjGeJ1lAtWZHDjKGB0= github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= github.com/lyft/protoc-gen-validate v0.0.13 h1:KNt/RhmQTOLr7Aj8PsJ7mTronaFyx80mRTT9qF261dA= -github.com/magefile/mage v1.9.0 h1:t3AU2wNwehMCW97vuqQLtw6puppWXHO+O2MHo5a50XE= github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/marten-seemann/qpack v0.2.1 h1:jvTsT/HpCn2UZJdP+UUB53FfUUgeOyG5K1ns0OJOGVs= github.com/marten-seemann/qtls v0.10.0 h1:ECsuYUKalRL240rRD4Ri33ISb7kAQ3qGDlrrl55b2pc= github.com/marten-seemann/qtls-go1-15 v0.1.5 h1:Ci4EIUN6Rlb+D6GmLdej/bCQ4nPYNtVXQB+xjiXE1nk= github.com/marten-seemann/qtls-go1-16 v0.1.5 h1:o9JrYPPco/Nukd/HpOHMHZoBDXQqoNtUCmny98/1uqQ= github.com/marten-seemann/qtls-go1-17 v0.1.2 h1:JADBlm0LYiVbuSySCHeY863dNkcpMmDR7s0bLKJeYlQ= -github.com/marten-seemann/qtls-go1-17 v0.1.2/go.mod h1:C2ekUKcDdz9SDWxec1N/MvcXBpaX9l3Nx67XaR84L5s= github.com/marten-seemann/qtls-go1-18 v0.1.2 h1:JH6jmzbduz0ITVQ7ShevK10Av5+jBEKAHMntXmIV7kM= -github.com/marten-seemann/qtls-go1-18 v0.1.2/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4= -github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= github.com/marten-seemann/qtls-go1-19 v0.1.0 h1:rLFKD/9mp/uq1SYGYuVZhm83wkmU95pK5df3GufyYYU= -github.com/marten-seemann/qtls-go1-19 v0.1.0/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= -github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/webtransport-go v0.1.1 h1:TnyKp3pEXcDooTaNn4s9dYpMJ7kMnTp7k5h+SgYP/mc= github.com/marten-seemann/webtransport-go v0.1.1/go.mod h1:kBEh5+RSvOA4troP1vyOVBWK4MIMzDICXVrvCPrYcrM= github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= -github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= -github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104 h1:d8RFOZ2IiFtFWBcKEHAFYJcPTf0wY5q0exFNJZVWa1U= github.com/mattn/go-xmlrpc v0.0.3 h1:Y6WEMLEsqs3RviBrAa1/7qmbGB7DVD3brZIbqMbQdGY= github.com/mdlayher/genetlink v1.0.0 h1:OoHN1OdyEIkScEmRgxLEe2M9U8ClMytqA5niynLtfj0= @@ -1183,12 +1103,7 @@ github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1f github.com/microcosm-cc/bluemonday v1.0.1 h1:SIYunPjnlXcW+gVfvm0IlSeR5U3WZUOLfVmqg85Go44= github.com/microsoft/go-mssqldb v1.3.0 h1:JcPVl+acL8Z/cQcJc9zP0OkjQ+l20bco/cCDpMbmGJk= github.com/microsoft/go-mssqldb v1.3.0/go.mod h1:lmWsjHD8XX/Txr0f8ZqgbEZSC+BZjmEQy/Ms+rLrvho= -github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= -github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= -github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= -github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= -github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI= @@ -1196,8 +1111,6 @@ github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8Ie github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= github.com/minio/sha256-simd v1.0.1-0.20230130105256-d9c3aea9e949/go.mod h1:svsp3c9I8SlWYKpIFAZMgdvmFn8DIN5C9ktYpzZEj80= github.com/mitchellh/cli v1.1.0 h1:tEElEatulEHDeedTxwckzyYMA5c86fbmNIUL1hBIiTg= -github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= -github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= github.com/mitchellh/gox v0.4.0 h1:lfGJxY7ToLJQjHHwi0EX6uYBdK78egf954SQl13PQJc= github.com/mitchellh/iochan v1.0.0 h1:C+X3KsSTLFVBr/tK1eYN/vs4rJcvsiLU338UhYPJWeY= @@ -1214,22 +1127,16 @@ github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3 github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5 h1:8Q0qkMVC/MmWkpIdlvZgcv2o2jrlF6zqVOh7W5YHdMA= -github.com/montanaflynn/stats v0.7.0 h1:r3y12KyNxj/Sb/iOE46ws+3mS1+MZca1wlHQFPsY/JU= -github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mrunalp/fileutils v0.5.0 h1:NKzVxiH7eSk+OQ4M+ZYW1K6h27RUV3MI6NUTsHhU6Z4= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY= -github.com/multiformats/go-multiaddr v0.6.0/go.mod h1:F4IpaKZuPP360tOMn2Tpyu0At8w23aRyVqeK0DbFeGM= -github.com/multiformats/go-multiaddr v0.7.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs= github.com/multiformats/go-multiaddr v0.8.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs= github.com/multiformats/go-multiaddr v0.9.0/go.mod h1:mI67Lb1EeTOYb8GQfL/7wpIZwc46ElrvzhYnoJOmTT0= github.com/multiformats/go-multiaddr-net v0.2.0 h1:MSXRGN0mFymt6B1yo/6BPnIRpLPEnKgQNvVfCX5VDJk= -github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= github.com/multiformats/go-multicodec v0.7.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= github.com/multiformats/go-multicodec v0.8.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= github.com/multiformats/go-multicodec v0.8.1/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= -github.com/multiformats/go-multistream v0.4.1/go.mod h1:Mz5eykRVAjJWckE2U78c6xqdtyNUEhKSM0Lwar2p77Q= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= @@ -1246,9 +1153,6 @@ github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c h1:bY6ktFuJkt+Z github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/ngdinhtoan/glide-cleanup v0.2.0 h1:kN4sV+0tp2F1BvwU+5SfNRMDndRmvIfnI3kZ7B8Yv4Y= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/nikkolasg/hexjson v0.1.0 h1:Cgi1MSZVQFoJKYeRpBNEcdF3LB+Zo4fYKsDz7h8uJYQ= -github.com/nikkolasg/hexjson v0.1.0/go.mod h1:fbGbWFZ0FmJMFbpCMtJpwb0tudVxSSZ+Es2TsCg57cA= -github.com/nkovacs/streamquote v1.0.0 h1:PmVIV08Zlx2lZK5fFZlMZ04eHcDTIFJCv/5/0twVUow= github.com/oklog/oklog v0.3.2 h1:wVfs8F+in6nTBMkA7CbRw+zZMIB7nNM825cM1wuzoTk= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= @@ -1258,24 +1162,15 @@ github.com/onsi/ginkgo/v2 v2.2.0/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7 github.com/onsi/ginkgo/v2 v2.5.1/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc= github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k= github.com/onsi/ginkgo/v2 v2.9.7/go.mod h1:cxrmXWykAwTwhQsJOPfdIDiJ+l2RYq7U8hFU+M/1uw0= -github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= -github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7 h1:lDH9UUVJtmYCjyT0CI4q8xvlXPxeZ0gYCVvWbmPlp88= -github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 h1:CznVS40zms0Dj5he4ERo+fRPtO0qxUk8lA8Xu3ddet0= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/image-spec v1.1.0-rc4 h1:oOxKUJWnFC4YGHCCMNql1x4YaDfYBTS5Y4x/Cgeo1E0= github.com/opencontainers/image-spec v1.1.0-rc4/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= github.com/opencontainers/runc v1.1.7 h1:y2EZDS8sNng4Ksf0GUYNhKbTShZJPJg1FiXJNH/uoCk= github.com/opencontainers/runc v1.1.7/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50= -github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= -github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.8.2 h1:c4ca10UMgRcvZ6h0K4HtS15UaVSBEaE+iln2LVpAuGc= -github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU= -github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= github.com/opentracing/basictracer-go v1.0.0 h1:YyUAhaEfjoWXclZVJ9sGoNct7j4TVk7lZWlQw5UXuoo= github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5 h1:ZCnq+JUrvXcDVhX/xRolRBZifmabN1HcS1wrPSvxhrU= github.com/openzipkin/zipkin-go v0.4.1 h1:kNd/ST2yLLWhaWrkgchya40TJabe8Hioj9udfPcEO5A= @@ -1287,11 +1182,9 @@ github.com/orlangure/gnomock v0.24.0/go.mod h1:h/LLsICS1PuAufvBcYv7YMBEVF0BldSKt github.com/ory/dockertest/v3 v3.10.0 h1:4K3z2VMe8Woe++invjaTB7VRyQXQy5UY+loujO4aNE4= github.com/ory/dockertest/v3 v3.10.0/go.mod h1:nr57ZbRWMqfsdGdFNLHz5jjNdDb7VVFnzAeW1n5N1Lg= github.com/pact-foundation/pact-go v1.0.4 h1:OYkFijGHoZAYbOIb1LWXrwKQbMMRUv1oQ89blD2Mh2Q= -github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/paulbellamy/ratecounter v0.2.0 h1:2L/RhJq+HA8gBQImDXtLPrDXK5qAj6ozWVK/zFXVJGs= github.com/paulmach/orb v0.9.2 h1:p/YWV2uJwamAynnDOJGNbPBVtDHj3vG51k9tR1rFwJE= github.com/paulmach/orb v0.9.2/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= -github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= @@ -1316,20 +1209,11 @@ github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZ github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5 h1:tFwafIEMf0B7NlcxV/zJ6leBIa81D3hgGSgsE5hCkOQ= github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289 h1:dTUS1vaLWq+Y6XKOTnrFpoVsQKLCbCp1OLj24TDi7oM= github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= -github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= -github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA= -github.com/puzpuzpuz/xsync/v2 v2.4.0 h1:5sXAMHrtx1bg9nbRZTOn8T4MkWe5V+o8yKRH02Eznag= -github.com/puzpuzpuz/xsync/v2 v2.4.0/go.mod h1:gD2H2krq/w52MfPLE+Uy64TzJDVY7lP2znR9qmR35kU= -github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= github.com/quic-go/qtls-go1-18 v0.2.0 h1:5ViXqBZ90wpUcZS0ge79rf029yx0dYB0McyPJwqqj7U= github.com/quic-go/qtls-go1-18 v0.2.0/go.mod h1:moGulGHK7o6O8lSPSZNoOwcLvJKJ85vVNc7oJFD65bc= github.com/quic-go/qtls-go1-19 v0.2.0/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI= @@ -1344,27 +1228,18 @@ github.com/quic-go/qtls-go1-20 v0.3.2/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58 github.com/quic-go/quic-go v0.32.0/go.mod h1:/fCsKANhQIeD5l76c2JFU+07gVE3KaA0FP+0zMWwfwo= github.com/quic-go/quic-go v0.33.0/go.mod h1:YMuhaAV9/jIu0XclDXwZPAsP/2Kgr5yMYhe9oxhhOFA= github.com/quic-go/quic-go v0.37.6/go.mod h1:YsbH1r4mSHPJcLF4k4zruUkLBqctEMBDR6VPvcYjIsU= -github.com/quic-go/quic-go v0.38.1 h1:M36YWA5dEhEeT+slOu/SwMEucbYd0YFidxG3KlGPZaE= -github.com/quic-go/quic-go v0.38.1/go.mod h1:ijnZM7JsFIkp4cRyjxJNIzdSfCLmUMg9wdyhGmg+SN4= github.com/quic-go/webtransport-go v0.5.2/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= -github.com/quic-go/webtransport-go v0.5.3/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= github.com/rabbitmq/amqp091-go v1.5.0 h1:VouyHPBu1CrKyJVfteGknGOGCzmOz0zcv/tONLkb7rg= github.com/rabbitmq/amqp091-go v1.5.0/go.mod h1:JsV0ofX5f1nwOGafb8L5rBItt9GyhfQfcJj+oyz0dGg= -github.com/raulk/clock v1.1.0 h1:dpb29+UKMbLqiU/jqIJptgLR1nn23HLgMY0sTCDza5Y= -github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= -github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52 h1:RnWNS9Hlm8BIkjr6wx8li5abe0fr73jljLycdfemTp0= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= -github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= github.com/rs/zerolog v1.21.0 h1:Q3vdXlfLNT+OftyBHsU0Y445MD+8m8axjKgf2si0QcM= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= @@ -1388,7 +1263,6 @@ github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtrmhM= github.com/segmentio/fasthash v1.0.3/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY= github.com/segmentio/kafka-go v0.2.0 h1:HtCSf6B4gN/87yc5qTl7WsxPKQIIGXLPPM1bMCPOsoY= -github.com/sercand/kuberesolver v2.4.0+incompatible h1:WE2OlRf6wjLxHwNkkFLQGaZcVLEXjMjBPjjEU5vksH8= github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4 h1:Fth6mevc5rX7glNLpbAMJnqKlfIkcTjZCSHEeqvKbcI= @@ -1419,9 +1293,6 @@ github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJV github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133 h1:JtcyT0rk/9PKOdnKQzuDR+FSjh7SGtJwpgVpfZBRKlQ= github.com/siebenmann/go-kstat v0.0.0-20160321171754-d34789b79745 h1:IuH7WumZNax0D+rEqmy2TyhKCzrtMGqbZO0b8rO00JA= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartystreets/assertions v1.13.0 h1:Dx1kYM01xsSqKPno3aqLnrwac2LetPvN23diwyr69Qs= github.com/smartystreets/assertions v1.13.0/go.mod h1:wDmR7qL282YbGsPy6H/yAsesrxfxaaSlJazyFLYVFx8= github.com/smola/gocompat v0.2.0 h1:6b1oIMlUXIpz//VKEDzPVBK8KG7beVwmHIUEBIs/Pns= github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= @@ -1447,7 +1318,6 @@ github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobt github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= github.com/streadway/amqp v1.0.0 h1:kuuDrUJFZL1QYL9hUNuCxNObNzB0bV/ZG5jV3RWAQgo= github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e h1:mOtuXaRAbVZsxAHVdPR3IjfmN8T1h2iczJLynhLybf8= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807 h1:LUsDduamlucuNnWcaTbXQ6aLILFcLXADpOzeEH3U+OI= @@ -1457,16 +1327,12 @@ github.com/supranational/blst v0.3.8-0.20220526154634-513d2456b344 h1:m+8fKfQwCA github.com/supranational/blst v0.3.8-0.20220526154634-513d2456b344/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07 h1:UyzmZLoiDWMRywV4DUYb9Fbt8uiOSooupjTq10vpvnU= -github.com/test-go/testify v1.1.4 h1:Tf9lntrKUMHiXQ07qBScBTSA0dhYQlu83hswqelv1iE= github.com/test-go/testify v1.1.4/go.mod h1:rH7cfJo/47vWGdi4GPj16x3/t1xGOj2YxzmNQzk2ghU= github.com/texttheater/golang-levenshtein v0.0.0-20180516184445-d188e65d659e h1:T5PdfK/M1xyrHwynxMIVMWLS7f/qHwfslZphxtGnw7s= github.com/thoas/go-funk v0.9.1 h1:O549iLZqPpTUQ10ykd26sZhzD+rmR5pWhuElrhbC20M= github.com/thoas/go-funk v0.9.1/go.mod h1:+IWnUfUmFO1+WVYQWQtIJHeRRdaIyyYglZN7xzUPe4Q= -github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM= github.com/tidwall/gjson v1.14.4/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= -github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= -github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= @@ -1476,28 +1342,16 @@ github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pv github.com/tj/go-spin v1.1.0 h1:lhdWZsvImxvZ3q1C5OIB7d72DuOwP4O2NdBg9PyzNds= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ= -github.com/twmb/murmur3 v1.1.6 h1:mqrRot1BRxm+Yct+vavLMou2/iJt0tNVTTC0QoIjaZg= -github.com/twmb/murmur3 v1.1.6/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= -github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb h1:Ywfo8sUltxogBpFuMOFRrrSifO788kAFxmvVw31PtQQ= github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb/go.mod h1:ikPs9bRWicNw3S7XpJ8sK/smGwU9WcSVU3dy9qahYBM= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go v1.2.6 h1:tGiWC9HENWE2tqYycIqFTNorMmFRVhNwCpDOpWqnk8E= -github.com/ugorji/go v1.2.6/go.mod h1:anCg0y61KIhDlPZmnH+so+RQbysYVyDko0IMgJv0Nn0= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -github.com/ugorji/go/codec v1.2.6 h1:7kbGefxLoDBuYXOms4yD7223OpNMMPNPZxXk5TvFcyQ= -github.com/ugorji/go/codec v1.2.6/go.mod h1:V6TCNZ4PHqoHGFZuSG1W8nrCzzdgA2DozYxWFFpvxTw= github.com/urfave/cli v1.22.10 h1:p8Fspmz3iTctJstry1PYS3HVdllxnEzTEsgIgtxTrCk= -github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= -github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= -github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= -github.com/valyala/fasttemplate v1.2.1 h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4= -github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/vertica/vertica-sql-go v1.3.2 h1:QclPnkuozQyNl6lbrTdeuFSx2/lcSRZc1XL8zWNSjdA= github.com/vertica/vertica-sql-go v1.3.2/go.mod h1:jnn2GFuv+O2Jcjktb7zyc4Utlbu9YVqpHH/lx63+1M4= github.com/viant/assertly v0.4.8 h1:5x1GzBaRteIwTr5RAGFVG14uNeRFxVNbXPWrK2qAgpc= @@ -1506,26 +1360,16 @@ github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJ github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k= github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30 h1:kZiWylALnUy4kzoKJemjH8eqwCl3RjW1r1ITCjjW7G8= github.com/warpfork/go-fsx v0.3.0 h1:RGueN83R4eOc/2oZkQ58RRxQS9JIevWgvoM55oaN9tE= -github.com/warpfork/go-fsx v0.3.0/go.mod h1:oTACCMj+Zle+vgVa5SAhGAh7WksYpLgGUCKEAVc+xPg= -github.com/warpfork/go-testmark v0.12.1 h1:rMgCpJfwy1sJ50x0M0NgyphxYYPMOODIJHhsXyEHU0s= -github.com/warpfork/go-testmark v0.12.1/go.mod h1:kHwy7wfvGSPh1rQJYKayD4AbtNaeyZdcGi9tNJTaa5Y= -github.com/weaveworks/common v0.0.0-20220810113439-c65105d60b18 h1:JN4YR/TNWiZEAHHImrVA2u4DPI+aqPOar23ICUnYZTQ= github.com/weaveworks/common v0.0.0-20220810113439-c65105d60b18/go.mod h1:YfOOLoW1Q/jIIu0WLeSwgStmrKjuJEZSKTAUc+0KFvE= -github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= -github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc h1:BCPnHtcboadS0DvysUuJXZ4lWVv5Bh5i7+tbIyi+ck4= -github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba h1:X4n8JG2e2biEZZXdBKt9HX7DN3bYGFUqljqqy0DqgnY= github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20230418232409-daab9ece03a0/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20230818171029-f91ae536ca25/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/go-ctrlnet v0.0.0-20180313164037-f564fbbdaa95 h1:c23eYhe7i8MG6dUSPzyIDDy5+cWOoZMovPamBKqrjYQ= -github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= github.com/whyrusleeping/go-logging v0.0.1 h1:fwpzlmT0kRC/Fmd0MdmGgJG/CXIZ6gFq46FQZjprUcc= github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f h1:M/lL30eFZTKnomXY6huvM6G0+gVquFNf6mxghaWlFUg= github.com/whyrusleeping/go-sysinfo v0.0.0-20190219211824-4a357d4b90b1 h1:ctS9Anw/KozviCCtK6VWMz5kPL9nbQzbQY4yfqlIV4M= -github.com/whyrusleeping/ledger-filecoin-go v0.9.1-0.20201010031517-c3dcc1bddce4 h1:NwiwjQDB3CzQ5XH0rdMh1oQqzJH7O2PSLWxif/w3zsY= github.com/whyrusleeping/mafmt v1.2.8 h1:TCghSl5kkwEE0j+sU/gudyhVMRlpBin8fMBBHg59EbA= github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9 h1:Y1/FEOpaCpD21WxrmfeIYCFPuVPRCY2XZTWzTNHGw30= -github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds= github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow= github.com/willf/bitset v1.1.3 h1:ekJIKh6+YbUIVt9DfNbkR5d6aFcFTLDRyJNAACURBg8= github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg= @@ -1541,7 +1385,6 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5 github.com/xlab/c-for-go v0.0.0-20201112171043-ea6dce5809cb h1:/7/dQyiKnxAOj9L69FhST7uMe17U015XPzX7cy+5ykM= github.com/xlab/pkgconfig v0.0.0-20170226114623-cea12a0fd245 h1:Sw125DKxZhPUI4JLlWugkzsrlB50jR9v2khiD9FxuSo= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6 h1:YdYsPAZ2pC6Tow/nPZOPQ96O3hm/ToAkGsPLzedXERk= -github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 h1:oWgZJmC1DorFZDpfMfWg7xk29yEOZiXmo/wZl+utTI8= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 h1:ESFSdwYZvkeru3RtdrYueztKhOBCSAAzS4Gf+k0tEow= github.com/ybbus/jsonrpc/v2 v2.1.7 h1:QjoXuZhkXZ3oLBkrONBe2avzFkYeYLorpeA+d8175XQ= github.com/ybbus/jsonrpc/v2 v2.1.7/go.mod h1:rIuG1+ORoiqocf9xs/v+ecaAVeo3zcZHQgInyKFMeg0= @@ -1550,15 +1393,7 @@ github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= github.com/zenazn/goji v0.9.0 h1:RSQQAbXGArQ0dIDEq+PI6WqN6if+5KHu6x2Cx/GXLTQ= github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= -github.com/zondax/hid v0.9.1 h1:gQe66rtmyZ8VeGFcOpbuH3r7erYtNEAezCAYu8LdkJo= -github.com/zondax/hid v0.9.1/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= -github.com/zondax/ledger-go v0.12.1 h1:hYRcyznPRJp+5mzF2sazTLP2nGvGjYDD2VzhHhFomLU= -github.com/zyedidia/generic v1.2.1 h1:Zv5KS/N2m0XZZiuLS82qheRG4X1o5gsWreGb0hR7XDc= -github.com/zyedidia/generic v1.2.1/go.mod h1:ly2RBz4mnz1yeuVbQA/VFwGjK3mnHGRj1JuoG336Bis= -go.dedis.ch/fixbuf v1.0.3 h1:hGcV9Cd/znUxlusJ64eAlExS+5cJDIyTyEG+otu5wQs= go.dedis.ch/kyber/v3 v3.0.9 h1:i0ZbOQocHUjfFasBiUql5zVeC7u/vahFd96DFA8UOWk= -go.dedis.ch/protobuf v1.0.11 h1:FTYVIEzY/bfl37lu3pR4lIj+F9Vp1jE8oh91VmxKgLo= -go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738 h1:VcrIfasaLFkyjk6KNlXQSzO+B0fZcnECiDrKJsfxka0= go.etcd.io/etcd/api/v3 v3.5.0 h1:GsV3S+OfZEOCNXdtNkBSR7kgLobAa/SO6tCxRa0GAYw= @@ -1578,7 +1413,6 @@ go.opentelemetry.io/contrib/propagators/jaeger v1.17.0 h1:Zbpbmwav32Ea5jSotpmkWE go.opentelemetry.io/contrib/propagators/jaeger v1.17.0/go.mod h1:tcTUAlmO8nuInPDSBVfG+CP6Mzjy5+gNV4mPxMbL0IA= go.opentelemetry.io/contrib/propagators/ot v1.17.0 h1:ufo2Vsz8l76eI47jFjuVyjyB3Ae2DmfiCV/o6Vc8ii0= go.opentelemetry.io/contrib/propagators/ot v1.17.0/go.mod h1:SbKPj5XGp8K/sGm05XblaIABgMgw2jDczP8gGeuaVLk= -go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= go.opentelemetry.io/otel v1.13.0/go.mod h1:FH3RtdZCzRkJYFTCsAKDy9l/XYjMdNv6QrkFFB8DvVg= go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= go.opentelemetry.io/otel/bridge/opencensus v0.39.0 h1:YHivttTaDhbZIHuPlg1sWsy2P5gj57vzqPfkHItgbwQ= @@ -1606,29 +1440,19 @@ go.opentelemetry.io/otel/sdk v1.14.0/go.mod h1:bwIC5TjrNG6QDCHNWvW4HLHtUQ4I+VQDs go.opentelemetry.io/otel/sdk/export/metric v0.25.0 h1:6UjAFmVB5Fza3K5qUJpYWGrk8QMPIqlSnya5FI46VBY= go.opentelemetry.io/otel/sdk/metric v0.39.0 h1:Kun8i1eYf48kHH83RucG93ffz0zGV1sh46FAScOTuDI= go.opentelemetry.io/otel/sdk/metric v0.39.0/go.mod h1:piDIRgjcK7u0HCL5pCA4e74qpK/jk3NiUoAHATVAmiI= -go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= go.opentelemetry.io/otel/trace v1.13.0/go.mod h1:muCvmmO9KKpvuXSf3KKAXXB2ygNYHQ+ZfI5X08d3tds= go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8= go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/dig v1.15.0/go.mod h1:pKHs0wMynzL6brANhB2hLMro+zalv1osARTviTcqHLM= -go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI= -go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU= go.uber.org/fx v1.18.2/go.mod h1:g0V1KMQ66zIRk8bLu3Ea5Jt2w/cHlOIp4wdRsgh0JaY= go.uber.org/fx v1.19.2/go.mod h1:43G1VcqSzbIv77y00p1DRAsyZS8WdzuYdhZXmEUkMyQ= -go.uber.org/fx v1.20.0 h1:ZMC/pnRvhsthOZh9MZjMq5U8Or3mA9zBSPaLnzs3ihQ= -go.uber.org/fx v1.20.0/go.mod h1:qCUj0btiR3/JnanEr1TYEePfSw6o/4qYJscgvzQ5Ub0= go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.22.0/go.mod h1:H4siCOZOrAolnUPJEkfaSjDqyP+BDS0DdDWzwcgt3+U= go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc= -go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d h1:E2M5QgjZ/Jg+ObCQAudsXxuTsLj7Nl5RV/lZcQZmKSo= golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80= @@ -1636,8 +1460,6 @@ golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= golang.org/x/exp v0.0.0-20221205204356-47842c84f3db/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/exp v0.0.0-20230129154200-a960b3787bd2/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= @@ -1655,7 +1477,6 @@ golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeap golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/image v0.6.0 h1:bR8b5okrPI3g/gyZakLZHeWxAR8Dn5CyxXv1hLH5g/4= golang.org/x/image v0.6.0/go.mod h1:MXLdDR43H7cDJq5GEGXEVeeNhPgi+YYEQ2pC1byI1x0= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f h1:kgfVkAEEQXXQ0qc6dH7n6y37NAYmTFmz0YRwrRjgxKw= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -1664,24 +1485,17 @@ golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20220923203811-8be639271d50/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= @@ -1704,7 +1518,6 @@ golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1712,14 +1525,7 @@ golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1731,21 +1537,15 @@ golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= @@ -1754,18 +1554,14 @@ golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= -golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= -gonum.org/v1/gonum v0.13.0 h1:a0T3bh+7fhRyqeNbiC3qVHYmkiQgit3wnNan/2c0HMM= -gonum.org/v1/gonum v0.13.0/go.mod h1:/WPYRckkfWrhWefxyYTfrTtQR0KH4iyHNuzxqXAKyAU= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc= gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= gonum.org/v1/plot v0.10.1 h1:dnifSs43YJuNMDzB7v8wV64O4ABBHReuAVAoBxqBqS4= @@ -1800,8 +1596,6 @@ google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= @@ -1812,14 +1606,9 @@ google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsA google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= -google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= -google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 h1:M1YKkFIboKNieVO5DLUEVzQfGwJD30Nv2jfUgzb5UcE= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= -gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk= gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs= @@ -1834,15 +1623,10 @@ gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d h1:mXa4inJUuWOoA4uER gopkg.in/src-d/go-log.v1 v1.0.1 h1:heWvX7J6qbGWbeFS/aRmiy1eYaT+QMV6wNvHDyMjQV4= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0 h1:POO/ycCATvegFmVuPpQzZFJ+pGZeX22Ufu6fibxDVjU= -gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919 h1:tmXTu+dfa+d9Evp8NpJdgOy6+rt8/x4yG7qPBrtNfLY= honnef.co/go/tools v0.1.3 h1:qTakTkI6ni6LFD5sBwwsdSO+AQqbSIxOauHTTQKZ/7o= -howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= -howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= modernc.org/cc/v3 v3.41.0/go.mod h1:Ni4zjJYJ04CDOhG7dn640WGfwBzfE0ecX8TyMB0Fv0Y= modernc.org/ccgo/v3 v3.16.14/go.mod h1:mPDSujUIaTNWQSG4eqKw+atqLOEbma6Ncsa94WbC9zo= diff --git a/indexprovider/mock/mock.go b/indexprovider/mock/mock.go deleted file mode 100644 index 0e6e4e4cf..000000000 --- a/indexprovider/mock/mock.go +++ /dev/null @@ -1,273 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/filecoin-project/boost-gfm/storagemarket (interfaces: StorageProvider) - -// Package mock is a generated GoMock package. -package mock - -import ( - context "context" - io "io" - reflect "reflect" - - shared "github.com/filecoin-project/boost-gfm/shared" - storagemarket "github.com/filecoin-project/boost-gfm/storagemarket" - abi "github.com/filecoin-project/go-state-types/abi" - big "github.com/filecoin-project/go-state-types/big" - gomock "github.com/golang/mock/gomock" - cid "github.com/ipfs/go-cid" -) - -// MockStorageProvider is a mock of StorageProvider interface. -type MockStorageProvider struct { - ctrl *gomock.Controller - recorder *MockStorageProviderMockRecorder -} - -// MockStorageProviderMockRecorder is the mock recorder for MockStorageProvider. -type MockStorageProviderMockRecorder struct { - mock *MockStorageProvider -} - -// NewMockStorageProvider creates a new mock instance. -func NewMockStorageProvider(ctrl *gomock.Controller) *MockStorageProvider { - mock := &MockStorageProvider{ctrl: ctrl} - mock.recorder = &MockStorageProviderMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockStorageProvider) EXPECT() *MockStorageProviderMockRecorder { - return m.recorder -} - -// AddStorageCollateral mocks base method. -func (m *MockStorageProvider) AddStorageCollateral(arg0 context.Context, arg1 big.Int) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddStorageCollateral", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// AddStorageCollateral indicates an expected call of AddStorageCollateral. -func (mr *MockStorageProviderMockRecorder) AddStorageCollateral(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddStorageCollateral", reflect.TypeOf((*MockStorageProvider)(nil).AddStorageCollateral), arg0, arg1) -} - -// AnnounceAllDealsToIndexer mocks base method. -func (m *MockStorageProvider) AnnounceAllDealsToIndexer(arg0 context.Context) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AnnounceAllDealsToIndexer", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// AnnounceAllDealsToIndexer indicates an expected call of AnnounceAllDealsToIndexer. -func (mr *MockStorageProviderMockRecorder) AnnounceAllDealsToIndexer(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AnnounceAllDealsToIndexer", reflect.TypeOf((*MockStorageProvider)(nil).AnnounceAllDealsToIndexer), arg0) -} - -// AnnounceDealToIndexer mocks base method. -func (m *MockStorageProvider) AnnounceDealToIndexer(arg0 context.Context, arg1 cid.Cid) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AnnounceDealToIndexer", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// AnnounceDealToIndexer indicates an expected call of AnnounceDealToIndexer. -func (mr *MockStorageProviderMockRecorder) AnnounceDealToIndexer(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AnnounceDealToIndexer", reflect.TypeOf((*MockStorageProvider)(nil).AnnounceDealToIndexer), arg0, arg1) -} - -// GetAsk mocks base method. -func (m *MockStorageProvider) GetAsk() *storagemarket.SignedStorageAsk { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAsk") - ret0, _ := ret[0].(*storagemarket.SignedStorageAsk) - return ret0 -} - -// GetAsk indicates an expected call of GetAsk. -func (mr *MockStorageProviderMockRecorder) GetAsk() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAsk", reflect.TypeOf((*MockStorageProvider)(nil).GetAsk)) -} - -// GetLocalDeal mocks base method. -func (m *MockStorageProvider) GetLocalDeal(arg0 cid.Cid) (storagemarket.MinerDeal, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetLocalDeal", arg0) - ret0, _ := ret[0].(storagemarket.MinerDeal) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetLocalDeal indicates an expected call of GetLocalDeal. -func (mr *MockStorageProviderMockRecorder) GetLocalDeal(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLocalDeal", reflect.TypeOf((*MockStorageProvider)(nil).GetLocalDeal), arg0) -} - -// GetStorageCollateral mocks base method. -func (m *MockStorageProvider) GetStorageCollateral(arg0 context.Context) (storagemarket.Balance, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetStorageCollateral", arg0) - ret0, _ := ret[0].(storagemarket.Balance) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetStorageCollateral indicates an expected call of GetStorageCollateral. -func (mr *MockStorageProviderMockRecorder) GetStorageCollateral(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStorageCollateral", reflect.TypeOf((*MockStorageProvider)(nil).GetStorageCollateral), arg0) -} - -// ImportDataForDeal mocks base method. -func (m *MockStorageProvider) ImportDataForDeal(arg0 context.Context, arg1 cid.Cid, arg2 io.Reader) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ImportDataForDeal", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// ImportDataForDeal indicates an expected call of ImportDataForDeal. -func (mr *MockStorageProviderMockRecorder) ImportDataForDeal(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportDataForDeal", reflect.TypeOf((*MockStorageProvider)(nil).ImportDataForDeal), arg0, arg1, arg2) -} - -// ListLocalDeals mocks base method. -func (m *MockStorageProvider) ListLocalDeals() ([]storagemarket.MinerDeal, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListLocalDeals") - ret0, _ := ret[0].([]storagemarket.MinerDeal) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListLocalDeals indicates an expected call of ListLocalDeals. -func (mr *MockStorageProviderMockRecorder) ListLocalDeals() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListLocalDeals", reflect.TypeOf((*MockStorageProvider)(nil).ListLocalDeals)) -} - -// ListLocalDealsPage mocks base method. -func (m *MockStorageProvider) ListLocalDealsPage(arg0 *cid.Cid, arg1, arg2 int) ([]storagemarket.MinerDeal, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListLocalDealsPage", arg0, arg1, arg2) - ret0, _ := ret[0].([]storagemarket.MinerDeal) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListLocalDealsPage indicates an expected call of ListLocalDealsPage. -func (mr *MockStorageProviderMockRecorder) ListLocalDealsPage(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListLocalDealsPage", reflect.TypeOf((*MockStorageProvider)(nil).ListLocalDealsPage), arg0, arg1, arg2) -} - -// LocalDealCount mocks base method. -func (m *MockStorageProvider) LocalDealCount() (int, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "LocalDealCount") - ret0, _ := ret[0].(int) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// LocalDealCount indicates an expected call of LocalDealCount. -func (mr *MockStorageProviderMockRecorder) LocalDealCount() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LocalDealCount", reflect.TypeOf((*MockStorageProvider)(nil).LocalDealCount)) -} - -// OnReady mocks base method. -func (m *MockStorageProvider) OnReady(arg0 shared.ReadyFunc) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "OnReady", arg0) -} - -// OnReady indicates an expected call of OnReady. -func (mr *MockStorageProviderMockRecorder) OnReady(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnReady", reflect.TypeOf((*MockStorageProvider)(nil).OnReady), arg0) -} - -// RetryDealPublishing mocks base method. -func (m *MockStorageProvider) RetryDealPublishing(arg0 cid.Cid) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RetryDealPublishing", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// RetryDealPublishing indicates an expected call of RetryDealPublishing. -func (mr *MockStorageProviderMockRecorder) RetryDealPublishing(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RetryDealPublishing", reflect.TypeOf((*MockStorageProvider)(nil).RetryDealPublishing), arg0) -} - -// SetAsk mocks base method. -func (m *MockStorageProvider) SetAsk(arg0, arg1 big.Int, arg2 abi.ChainEpoch, arg3 ...storagemarket.StorageAskOption) error { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1, arg2} - for _, a := range arg3 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "SetAsk", varargs...) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetAsk indicates an expected call of SetAsk. -func (mr *MockStorageProviderMockRecorder) SetAsk(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetAsk", reflect.TypeOf((*MockStorageProvider)(nil).SetAsk), varargs...) -} - -// Start mocks base method. -func (m *MockStorageProvider) Start(arg0 context.Context) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Start", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Start indicates an expected call of Start. -func (mr *MockStorageProviderMockRecorder) Start(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockStorageProvider)(nil).Start), arg0) -} - -// Stop mocks base method. -func (m *MockStorageProvider) Stop() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Stop") - ret0, _ := ret[0].(error) - return ret0 -} - -// Stop indicates an expected call of Stop. -func (mr *MockStorageProviderMockRecorder) Stop() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockStorageProvider)(nil).Stop)) -} - -// SubscribeToEvents mocks base method. -func (m *MockStorageProvider) SubscribeToEvents(arg0 storagemarket.ProviderSubscriber) shared.Unsubscribe { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SubscribeToEvents", arg0) - ret0, _ := ret[0].(shared.Unsubscribe) - return ret0 -} - -// SubscribeToEvents indicates an expected call of SubscribeToEvents. -func (mr *MockStorageProviderMockRecorder) SubscribeToEvents(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscribeToEvents", reflect.TypeOf((*MockStorageProvider)(nil).SubscribeToEvents), arg0) -} diff --git a/node/modules/directdeals.go b/node/modules/directdeals.go index 0bca1759a..146e01968 100644 --- a/node/modules/directdeals.go +++ b/node/modules/directdeals.go @@ -3,7 +3,6 @@ package modules import ( "database/sql" - gfm_storagemarket "github.com/filecoin-project/boost-gfm/storagemarket" "github.com/filecoin-project/boost/db" "github.com/filecoin-project/boost/fundmanager" "github.com/filecoin-project/boost/indexprovider" @@ -24,13 +23,12 @@ import ( "go.uber.org/fx" ) -func NewDirectDealsProvider(provAddr address.Address, cfg *config.Boost) func(lc fx.Lifecycle, h host.Host, fullnodeApi v1api.FullNode, sqldb *sql.DB, directDealsDB *db.DirectDealsDB, fundMgr *fundmanager.FundManager, storageMgr *storagemanager.StorageManager, dp *storageadapter.DealPublisher, secb *sectorblocks.SectorBlocks, commpc types.CommpCalculator, commpt storagemarket.CommpThrottle, sps sealingpipeline.API, df dtypes.StorageDealFilter, logsSqlDB *LogSqlDB, logsDB *db.LogsDB, piecedirectory *piecedirectory.PieceDirectory, ip *indexprovider.Wrapper, lp gfm_storagemarket.StorageProvider, cdm *storagemarket.ChainDealManager) (*storagemarket.DirectDealsProvider, error) { +func NewDirectDealsProvider(provAddr address.Address, cfg *config.Boost) func(lc fx.Lifecycle, h host.Host, fullnodeApi v1api.FullNode, sqldb *sql.DB, directDealsDB *db.DirectDealsDB, fundMgr *fundmanager.FundManager, storageMgr *storagemanager.StorageManager, dp *storageadapter.DealPublisher, secb *sectorblocks.SectorBlocks, commpc types.CommpCalculator, commpt storagemarket.CommpThrottle, sps sealingpipeline.API, df dtypes.StorageDealFilter, logsSqlDB *LogSqlDB, logsDB *db.LogsDB, piecedirectory *piecedirectory.PieceDirectory, ip *indexprovider.Wrapper, cdm *storagemarket.ChainDealManager) (*storagemarket.DirectDealsProvider, error) { return func(lc fx.Lifecycle, h host.Host, fullnodeApi v1api.FullNode, sqldb *sql.DB, directDealsDB *db.DirectDealsDB, fundMgr *fundmanager.FundManager, storageMgr *storagemanager.StorageManager, dp *storageadapter.DealPublisher, secb *sectorblocks.SectorBlocks, commpc types.CommpCalculator, commpt storagemarket.CommpThrottle, sps sealingpipeline.API, df dtypes.StorageDealFilter, logsSqlDB *LogSqlDB, logsDB *db.LogsDB, - piecedirectory *piecedirectory.PieceDirectory, ip *indexprovider.Wrapper, - lp gfm_storagemarket.StorageProvider, cdm *storagemarket.ChainDealManager) (*storagemarket.DirectDealsProvider, error) { + piecedirectory *piecedirectory.PieceDirectory, ip *indexprovider.Wrapper, cdm *storagemarket.ChainDealManager) (*storagemarket.DirectDealsProvider, error) { dl := logs.NewDealLogger(logsDB) From 7765a3e235def6a7db76a8d6572c013dbf7803c4 Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Tue, 19 Dec 2023 18:53:05 +0400 Subject: [PATCH 25/34] handle storage ask --- storagemarket/lp2pimpl/net.go | 102 ++++++++++++++++++++++++++++++++++ 1 file changed, 102 insertions(+) diff --git a/storagemarket/lp2pimpl/net.go b/storagemarket/lp2pimpl/net.go index 7c8708365..3081c2ff4 100644 --- a/storagemarket/lp2pimpl/net.go +++ b/storagemarket/lp2pimpl/net.go @@ -208,6 +208,10 @@ func (p *DealProvider) Start(ctx context.Context) { p.host.SetStreamHandler(legacytypes.DealProtocolID101, p.handleLegacyDealStream) p.host.SetStreamHandler(legacytypes.DealProtocolID110, p.handleLegacyDealStream) p.host.SetStreamHandler(legacytypes.DealProtocolID111, p.handleLegacyDealStream) + + // Handle Query Ask + p.host.SetStreamHandler(legacytypes.AskProtocolID, p.handleNewAskStream) + p.host.SetStreamHandler(legacytypes.OldAskProtocolID, p.handleOldAskStream) } func (p *DealProvider) Stop() { @@ -530,3 +534,101 @@ func (p *DealProvider) signLegacyResponse(resp typegen.CBORMarshaler) (*crypto.S return localSignature, err } + +func (p *DealProvider) handleNewAskStream(s network.Stream) { + start := time.Now() + reqLog := log.With("client-peer", s.Conn().RemotePeer()) + reqLog.Debugw("new queryAsk request") + + defer func() { + err := s.Close() + if err != nil { + reqLog.Infow("closing stream", "err", err) + } + reqLog.Debugw("handled queryAsk request", "duration", time.Since(start).String()) + }() + + // Read the deal status request from the stream + _ = s.SetReadDeadline(time.Now().Add(providerReadDeadline)) + var req gfm_network.AskRequest + err := req.UnmarshalCBOR(s) + _ = s.SetReadDeadline(time.Time{}) // Clear read deadline so conn doesn't get closed + if err != nil { + reqLog.Warnw("reading queryAsk request from stream", "err", err) + return + } + + var resp gfm_network.AskResponse + + if req.Miner.String() == p.prov.Address.String() { + resp.Ask = p.prov.GetAsk() + } else { + reqLog.Warnw("storage provider for address %s receive ask for miner with address %s", p.prov.Address, req.Miner) + } + + // Set a deadline on writing to the stream so it doesn't hang + _ = s.SetWriteDeadline(time.Now().Add(providerWriteDeadline)) + defer s.SetWriteDeadline(time.Time{}) // nolint + + if err := cborutil.WriteCborRPC(s, &resp); err != nil { + reqLog.Errorw("failed to write queryAsk response", "err", err) + } +} + +func (p *DealProvider) handleOldAskStream(s network.Stream) { + start := time.Now() + reqLog := log.With("client-peer", s.Conn().RemotePeer()) + reqLog.Debugw("new queryAsk request") + + defer func() { + err := s.Close() + if err != nil { + reqLog.Infow("closing stream", "err", err) + } + reqLog.Debugw("handled queryAsk request", "duration", time.Since(start).String()) + }() + + // Read the deal status request from the stream + _ = s.SetReadDeadline(time.Now().Add(providerReadDeadline)) + var req mig.AskRequest0 + err := req.UnmarshalCBOR(s) + _ = s.SetReadDeadline(time.Time{}) // Clear read deadline so conn doesn't get closed + if err != nil { + reqLog.Warnw("reading queryAsk request from stream", "err", err) + return + } + + var resp mig.AskResponse0 + + if req.Miner.String() == p.prov.Address.String() { + ask := p.prov.GetAsk() + + newAsk := ask.Ask + resp.Ask.Ask = &mig.StorageAsk0{ + Price: newAsk.Price, + VerifiedPrice: newAsk.VerifiedPrice, + MinPieceSize: newAsk.MinPieceSize, + MaxPieceSize: newAsk.MaxPieceSize, + Miner: newAsk.Miner, + Timestamp: newAsk.Timestamp, + Expiry: newAsk.Expiry, + SeqNo: newAsk.SeqNo, + } + oldSig, err := p.signLegacyResponse(&resp) + if err != nil { + reqLog.Errorf("getting signed response: %s", err) + } + + resp.Ask.Signature = oldSig + } else { + reqLog.Warnw("storage provider for address %s receive ask for miner with address %s", p.prov.Address, req.Miner) + } + + // Set a deadline on writing to the stream so it doesn't hang + _ = s.SetWriteDeadline(time.Now().Add(providerWriteDeadline)) + defer s.SetWriteDeadline(time.Time{}) // nolint + + if err := cborutil.WriteCborRPC(s, &resp); err != nil { + reqLog.Errorw("failed to write queryAsk response", "err", err) + } +} From f4b911f9823cfc38ab88236b590bd01b6c1f5126 Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Tue, 19 Dec 2023 23:29:56 +0400 Subject: [PATCH 26/34] cleanup config --- cmd/boostd/init.go | 521 -------------------- cmd/boostd/main.go | 3 - cmd/boostd/piecedir.go | 1 + cmd/booster-http/e2e_test.go | 2 +- cmd/booster-http/trustless_gateway_test.go | 2 +- gql/resolver_dealpublish.go | 2 +- gql/resolver_legacy_storage.go | 35 -- gql/resolver_rtvllog.go | 2 +- gql/resolver_transfers.go | 2 +- gql/schema.graphql | 9 - indexprovider/wrapper.go | 20 +- itests/data_segment_index_retrieval_test.go | 1 - itests/dummydeal_offline_test.go | 1 - itests/dummydeal_podsi_test.go | 4 +- itests/dummydeal_test.go | 1 - itests/framework/framework.go | 18 +- itests/graphsync_identity_cid_test.go | 1 - itests/graphsync_retrieval_test.go | 1 - itests/ipni_publish_test.go | 1 - itests/shared/multiminer.go | 4 +- markets/storageadapter/dealpublisher.go | 10 +- node/builder.go | 32 +- node/config/def.go | 134 ++--- node/config/doc_gen.go | 371 +++++--------- node/config/types.go | 237 ++++----- node/modules/graphsync.go | 10 +- node/modules/piecedirectory.go | 2 +- node/modules/retrieval.go | 20 +- node/modules/storageminer.go | 8 +- react/src/StorageSpace.js | 58 +-- react/src/gql.js | 11 - storagemarket/lp2pimpl/net.go | 26 +- 32 files changed, 335 insertions(+), 1215 deletions(-) delete mode 100644 gql/resolver_legacy_storage.go diff --git a/cmd/boostd/init.go b/cmd/boostd/init.go index a3563ed2a..b61408cbd 100644 --- a/cmd/boostd/init.go +++ b/cmd/boostd/init.go @@ -1,36 +1,24 @@ package main import ( - "bufio" "context" "errors" "fmt" - "io" "os" - "os/exec" "path" "strings" - "github.com/chzyer/readline" - "github.com/dustin/go-humanize" - "github.com/filecoin-project/boost/api" cliutil "github.com/filecoin-project/boost/cli/util" scliutil "github.com/filecoin-project/boost/extern/boostd-data/shared/cliutil" "github.com/filecoin-project/boost/node/config" - "github.com/filecoin-project/boost/node/impl/backupmgr" "github.com/filecoin-project/boost/node/repo" - "github.com/filecoin-project/boost/util" "github.com/filecoin-project/go-address" lapi "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/client" "github.com/filecoin-project/lotus/api/v0api" lcli "github.com/filecoin-project/lotus/cli" - lotus_config "github.com/filecoin-project/lotus/node/config" - lotus_modules "github.com/filecoin-project/lotus/node/modules" lotus_repo "github.com/filecoin-project/lotus/node/repo" - "github.com/gbrlsnchs/jwt/v3" "github.com/ipfs/go-datastore" - dsq "github.com/ipfs/go-datastore/query" "github.com/urfave/cli/v2" ) @@ -151,395 +139,6 @@ var initCmd = &cli.Command{ }, } -var migrateFlags = []cli.Flag{ - &cli.StringFlag{ - Name: "wallet-publish-storage-deals", - Usage: "wallet to be used for PublishStorageDeals messages", - Required: true, - }, - &cli.StringFlag{ - Name: "wallet-deal-collateral", - Usage: "wallet to be used for deal collateral", - Required: true, - }, - &cli.Int64Flag{ - Name: "max-staging-deals-bytes", - Usage: "max size for staging area in bytes", - Required: true, - }, -} - -var migrateMarketsCmd = &cli.Command{ - Name: "migrate-markets", - Usage: "Migrate from an existing split markets (MRA) repo to Boost", - Flags: append([]cli.Flag{ - &cli.StringFlag{ - Name: "import-markets-repo", - Usage: "initialize boost from an existing split markets (MRA) repo", - Required: true, - }}, - migrateFlags..., - ), - Before: before, - Action: func(cctx *cli.Context) error { - return migrate(cctx, false, cctx.String("import-markets-repo")) - }, -} - -var migrateMonolithCmd = &cli.Command{ - Name: "migrate-monolith", - Usage: "Migrate from an existing monolith lotus-miner repo to Boost", - Flags: append([]cli.Flag{ - &cli.StringFlag{ - Name: "import-miner-repo", - Usage: "initialize boost from an existing monolith lotus-miner repo", - Required: true, - }}, - append(minerApiFlags, migrateFlags...)..., - ), - Before: before, - Action: func(cctx *cli.Context) error { - return migrate(cctx, true, cctx.String("import-miner-repo")) - }, -} - -func migrate(cctx *cli.Context, fromMonolith bool, mktsRepoPath string) error { - ctx := scliutil.ReqContext(cctx) - - // Open markets repo - fmt.Printf("Opening repo '%s'\n", mktsRepoPath) - mktsRepo, err := getMarketsRepo(mktsRepoPath) - if err != nil { - return err - } - defer mktsRepo.Close() //nolint:errcheck - - // Initialize boost repo - bp, err := initBoost(ctx, cctx, mktsRepo) - if err != nil { - return err - } - - boostRepo, err := bp.repo.Lock(repo.Boost) - if err != nil { - return err - } - defer boostRepo.Close() - - ds, err := boostRepo.Datastore(context.Background(), metadataNamespace) - if err != nil { - return err - } - - // Migrate datastore keys - fmt.Println("Migrating datastore keys") - err = migrateMarketsDatastore(ctx, ds, mktsRepo) - if err != nil { - return err - } - - // Migrate keystore - fmt.Println("Migrating keystore") - err = backupmgr.CopyKeysBetweenRepos(mktsRepo, boostRepo) - if err != nil { - return err - } - - // Migrate config - fmt.Println("Migrating markets config") - err = migrateMarketsConfig(cctx, mktsRepo, boostRepo, bp, fromMonolith) - if err != nil { - return err - } - - // Add the miner address to the metadata datastore - fmt.Printf("Adding miner address %s to datastore\n", bp.minerActor) - err = addMinerAddressToDatastore(ds, bp.minerActor) - if err != nil { - return err - } - - // Copy the storage.json file if there is one, otherwise create an empty one - err = migrateStorageJson(mktsRepo.Path(), boostRepo.Path()) - if err != nil { - return err - } - - // Create an auth token - err = createAuthToken(bp.repo, boostRepo) - if err != nil { - return err - } - - // Migrate DAG store - err = migrateDAGStore(ctx, mktsRepo, boostRepo) - if err != nil { - return err - } - - fmt.Println("Boost repo successfully created at " + boostRepo.Path()) - fmt.Println("You can now start boost with 'boostd -vv run'") - - return nil -} - -func migrateStorageJson(mktsRepoPath string, boostRepoPath string) error { - mktsFilePath := path.Join(mktsRepoPath, "storage.json") - boostFilePath := path.Join(boostRepoPath, "storage.json") - - // Read storage.json in the markets repo - bz, err := os.ReadFile(mktsFilePath) - if err != nil { - if !errors.Is(err, os.ErrNotExist) { - return fmt.Errorf("reading %s: %w", mktsFilePath, err) - } - - // There is no storage.json in the markets repo, so create an empty one - // in the Boost repo - fmt.Println("Creating storage.json file") - bz = []byte("{}") - } else { - fmt.Println("Migrating storage.json file") - } - - // Write storage.json in the boost repo - err = os.WriteFile(boostFilePath, bz, 0666) - if err != nil { - return fmt.Errorf("writing %s: %w", boostFilePath, err) - } - - return nil -} - -func createAuthToken(boostRepo *lotus_repo.FsRepo, boostRepoLocked lotus_repo.LockedRepo) error { - ks, err := boostRepoLocked.KeyStore() - if err != nil { - return fmt.Errorf("getting boost keystore: %w", err) - } - - // Set up the API secret key in the keystore - _, err = lotus_modules.APISecret(ks, boostRepoLocked) - if err != nil { - return fmt.Errorf("generating API token: %w", err) - } - - // Get the API token from the repo - _, err = boostRepo.APIToken() - if err == nil { - // If the token already exists, nothing more to do - return nil - } - - // Check if the error was because the token has not been created (expected) - // or for some other reason - if !errors.Is(err, lotus_repo.ErrNoAPIEndpoint) { - return fmt.Errorf("getting API token for newly created boost repo: %w", err) - } - - // The token does not exist, so create a new token - p := lotus_modules.JwtPayload{ - Allow: api.AllPermissions, - } - - // Get the API secret key - key, err := ks.Get(lotus_modules.JWTSecretName) - if err != nil { - // This should never happen because it gets created by the APISecret - // function above - return fmt.Errorf("getting key %s from keystore to generate API token: %w", - lotus_modules.JWTSecretName, err) - } - - // Create the API token - cliToken, err := jwt.Sign(&p, jwt.NewHS256(key.PrivateKey)) - if err != nil { - return fmt.Errorf("signing JSW payload for API token: %w", err) - } - - // Save the API token in the repo - err = boostRepoLocked.SetAPIToken(cliToken) - if err != nil { - return fmt.Errorf("setting boost API token: %w", err) - } - - return nil -} - -func migrateDAGStore(ctx context.Context, mktsRepo lotus_repo.LockedRepo, boostRepo lotus_repo.LockedRepo) error { - - subdir := "dagstore" - - mktsSubdirPath := path.Join(mktsRepo.Path(), subdir) - boostSubdirPath := path.Join(boostRepo.Path(), subdir) - - rawMktsCfg, err := mktsRepo.Config() - if err != nil { - return fmt.Errorf("getting markets repo config: %w", err) - } - mktsCfg, ok := rawMktsCfg.(*lotus_config.StorageMiner) - if !ok { - return fmt.Errorf("expected legacy markets config, got %T", rawMktsCfg) - } - - if len(mktsCfg.DAGStore.RootDir) > 0 { - fmt.Println("Not migrating the dagstore as a custom dagstore path is set. Please manually move or copy the dagstore to $BOOST_PATH/dagstore") - return nil - } - - dirInfo, err := os.Lstat(mktsSubdirPath) - if err != nil { - if errors.Is(err, os.ErrNotExist) { - return nil - } - return fmt.Errorf("reading %s path %s", subdir, mktsSubdirPath) - } - - // If it's a sym-link just copy the sym-link - if dirInfo.Mode()&os.ModeSymlink == os.ModeSymlink { - fmt.Printf("copying sym-link %s to %s\n", mktsSubdirPath, boostSubdirPath) - cmd := exec.Command("cp", "-a", mktsSubdirPath, boostSubdirPath) - err = cmd.Run() - if err != nil { - return fmt.Errorf("Copying sym-link %s %s to %s: %w", subdir, mktsSubdirPath, boostSubdirPath, err) - } - return nil - } - - if !dirInfo.IsDir() { - return fmt.Errorf("expected %s to be a directory but it's not", mktsSubdirPath) - } - - dirSizeBytes, err := util.DirSize(mktsSubdirPath) - if err != nil { - return fmt.Errorf("getting size of %s: %w", mktsSubdirPath, err) - } - - humanSize := humanize.Bytes(uint64(dirSizeBytes)) - fmt.Printf("%s directory size: %s\n", subdir, humanSize) - - // If the directory is small enough, just copy it - if dirSizeBytes < 1024 { - fmt.Printf("Copying %s to %s\n", mktsSubdirPath, boostSubdirPath) - cmd := exec.Command("cp", "-r", mktsSubdirPath, boostSubdirPath) - err = cmd.Run() - if err != nil { - return fmt.Errorf("Copying %s directory %s to %s: %w", subdir, mktsSubdirPath, boostSubdirPath, err) - } - return nil - } - - cs := readline.NewCancelableStdin(os.Stdin) - go func() { - <-ctx.Done() - cs.Close() // nolint:errcheck - }() - rl := bufio.NewReader(cs) - for { - fmt.Printf("%s directory size is %s. Copy [c] / Move [m] / Ignore [i]:\n", subdir, humanSize) - - line, _, err := rl.ReadLine() - if err != nil { - if errors.Is(err, io.EOF) { - return fmt.Errorf("boost initialize canceled: %w", err) - } - - return fmt.Errorf("reading input: %w", err) - } - - switch string(line) { - case "c", "y": - fmt.Printf("Copying %s to %s\n", mktsSubdirPath, boostSubdirPath) - cmd := exec.Command("cp", "-r", mktsSubdirPath, boostSubdirPath) - err = cmd.Run() - if err != nil { - return fmt.Errorf("Copying %s directory %s to %s: %w", subdir, mktsSubdirPath, boostSubdirPath, err) - } - return nil - case "m": - fmt.Printf("Moving %s to %s\n", mktsSubdirPath, boostSubdirPath) - cmd := exec.Command("mv", mktsSubdirPath, boostSubdirPath) - err = cmd.Run() - if err != nil { - return fmt.Errorf("Moving %s directory %s to %s: %w", subdir, mktsSubdirPath, boostSubdirPath, err) - } - return nil - case "i": - fmt.Printf("Not copying %s directory from markets to boost\n", subdir) - return nil - } - } -} - -func migrateMarketsConfig(cctx *cli.Context, mktsRepo lotus_repo.LockedRepo, boostRepo lotus_repo.LockedRepo, bp *boostParams, fromMonolith bool) error { - var cerr error - err := boostRepo.SetConfig(func(raw interface{}) { - rcfg, ok := raw.(*config.Boost) - if !ok { - cerr = errors.New("expected boost config") - return - } - - rawMktsCfg, err := mktsRepo.Config() - if err != nil { - cerr = fmt.Errorf("getting markets repo config: %w", err) - return - } - mktsCfg, ok := rawMktsCfg.(*lotus_config.StorageMiner) - if !ok { - cerr = fmt.Errorf("expected legacy markets config, got %T", rawMktsCfg) - return - } - - if !fromMonolith { - // When migrating from a split markets process, copy across the API - // listen address because we're going to replace the split markets - // process with boost. - // (When migrating from a monolith leave the defaults which are - // different from lotus miner, so they won't clash). - rcfg.Common.API = mktsCfg.Common.API - } - rcfg.Common.Backup = mktsCfg.Common.Backup - rcfg.Common.Libp2p = mktsCfg.Common.Libp2p - rcfg.Storage.ParallelFetchLimit = mktsCfg.Storage.ParallelFetchLimit - setBoostDealMakingCfg(&rcfg.Dealmaking, mktsCfg) - rcfg.LotusDealmaking = mktsCfg.Dealmaking - rcfg.LotusFees.MaxMarketBalanceAddFee = mktsCfg.Fees.MaxMarketBalanceAddFee - rcfg.LotusFees.MaxPublishDealsFee = mktsCfg.Fees.MaxPublishDealsFee - rcfg.DAGStore = mktsCfg.DAGStore - // Clear the DAG store root dir config, because the DAG store is no longer configurable in Boost - // (it is always at /dagstore - rcfg.DAGStore.RootDir = "" - rcfg.IndexProvider.EntriesCacheCapacity = mktsCfg.IndexProvider.EntriesCacheCapacity - rcfg.IndexProvider.EntriesChunkSize = mktsCfg.IndexProvider.EntriesChunkSize - rcfg.IndexProvider.TopicName = mktsCfg.IndexProvider.TopicName - rcfg.IndexProvider.PurgeCacheOnStart = mktsCfg.IndexProvider.PurgeCacheOnStart - rcfg.IndexProvider.Enable = true // Enable index provider in Boost by default - - if fromMonolith { - // If migrating from a monolith miner, read the sealing and - // indexing endpoints from the command line parameters - cerr = setMinerApiConfig(cctx, rcfg, false) - if cerr != nil { - return - } - } else { - // If migrating from a split markets process, just copy across - // the sealing and indexing endpoints. - rcfg.SealerApiInfo = mktsCfg.Subsystems.SealerApiInfo - rcfg.SectorIndexApiInfo = mktsCfg.Subsystems.SectorIndexApiInfo - } - setCommonConfig(cctx, rcfg, bp) - }) - if cerr != nil { - return cerr - } - if err != nil { - return fmt.Errorf("setting config: %w", err) - } - - return nil -} - type boostParams struct { repo *lotus_repo.FsRepo minerActor address.Address @@ -703,126 +302,6 @@ func addMinerAddressToDatastore(ds datastore.Batching, minerActor address.Addres return ds.Put(context.Background(), minerAddrDSKey, minerActor.Bytes()) } -func setBoostDealMakingCfg(bdm *config.DealmakingConfig, mktsCfg *lotus_config.StorageMiner) { - ldm := mktsCfg.Dealmaking - bdm.ConsiderOnlineStorageDeals = ldm.ConsiderOnlineStorageDeals - bdm.ConsiderOfflineStorageDeals = ldm.ConsiderOfflineStorageDeals - bdm.ConsiderOnlineRetrievalDeals = ldm.ConsiderOnlineRetrievalDeals - bdm.ConsiderOfflineRetrievalDeals = ldm.ConsiderOfflineRetrievalDeals - bdm.ConsiderVerifiedStorageDeals = ldm.ConsiderVerifiedStorageDeals - bdm.ConsiderUnverifiedStorageDeals = ldm.ConsiderUnverifiedStorageDeals - bdm.PieceCidBlocklist = ldm.PieceCidBlocklist - bdm.ExpectedSealDuration = config.Duration(ldm.ExpectedSealDuration) - bdm.MaxDealStartDelay = config.Duration(ldm.MaxDealStartDelay) - bdm.MaxProviderCollateralMultiplier = ldm.MaxProviderCollateralMultiplier - bdm.MaxStagingDealsBytes = ldm.MaxStagingDealsBytes - bdm.StartEpochSealingBuffer = ldm.StartEpochSealingBuffer - bdm.Filter = ldm.Filter - bdm.RetrievalFilter = ldm.RetrievalFilter - bdm.RetrievalPricing = ldm.RetrievalPricing -} - -func getMarketsRepo(repoPath string) (lotus_repo.LockedRepo, error) { - // Open the repo at the repo path - mktsRepo, err := lotus_repo.NewFS(repoPath) - if err != nil { - return nil, fmt.Errorf("opening legacy markets repo %s: %w", repoPath, err) - } - - // Make sure the repo exists - exists, err := mktsRepo.Exists() - if err != nil { - return nil, fmt.Errorf("checking legacy markets repo %s exists: %w", repoPath, err) - } - if !exists { - return nil, fmt.Errorf("legacy markets repo %s does not exist", repoPath) - } - - // Lock the repo - lr, err := mktsRepo.LockRO(lotus_repo.StorageMiner) - if err != nil { - return nil, fmt.Errorf("locking legacy markets repo %s: %w", repoPath, err) - } - return lr, nil -} - -func migrateMarketsDatastore(ctx context.Context, boostDS datastore.Batching, mktsRepo lotus_repo.LockedRepo) error { - // Open the metadata datastore on the repo - mktsDS, err := mktsRepo.Datastore(ctx, metadataNamespace) - if err != nil { - return fmt.Errorf("opening datastore %s on legacy markets repo %s: %w", - metadataNamespace, mktsRepo.Path(), err) - } - - // Import the key / values from the markets metadata datastore - prefixes := []string{ - // Storage deals - "/deals/provider", - // Retrieval deals - "/retrievals/provider", - // Piece store - "/storagemarket", - } - for _, prefix := range prefixes { - err := importPrefix(ctx, prefix, mktsDS, boostDS) - if err != nil { - return err - } - } - - return nil -} - -func importPrefix(ctx context.Context, prefix string, mktsDS datastore.Batching, boostDS datastore.Batching) error { - fmt.Printf("Importing all legacy markets datastore keys under %s\n", prefix) - - q, err := mktsDS.Query(ctx, dsq.Query{ - Prefix: prefix, - }) - if err != nil { - return fmt.Errorf("legacy markets datastore query: %w", err) - } - defer q.Close() //nolint:errcheck - - // Import keys in batches - totalCount := 0 - batchSize := 1024 - results := q.Next() - for { - batch, err := boostDS.Batch(ctx) - if err != nil { - return fmt.Errorf("creating boost datastore batch: %w", err) - } - - complete := false - count := 0 - for ; count < batchSize; count++ { - res, ok := <-results - if !ok { - complete = true - break - } - - err := batch.Put(ctx, datastore.NewKey(res.Key), res.Value) - if err != nil { - return fmt.Errorf("putting %s to Boost datastore: %w", res.Key, err) - } - } - - fmt.Printf("Importing %d legacy markets datastore keys\n", count) - err = batch.Commit(ctx) - if err != nil { - return fmt.Errorf("saving %d datastore keys to Boost datastore: %w", count, err) - } - - totalCount += count - if complete { - fmt.Printf("Imported %d legacy markets datastore keys under %s\n", totalCount, prefix) - return nil - } - } -} - // checkV1ApiSupport uses v0 api version to signal support for v1 API // trying to query the v1 api on older lotus versions would get a 404, which can happen for any number of other reasons func checkV1ApiSupport(ctx context.Context, cctx *cli.Context) error { diff --git a/cmd/boostd/main.go b/cmd/boostd/main.go index 6cfb61737..ac9e51594 100644 --- a/cmd/boostd/main.go +++ b/cmd/boostd/main.go @@ -36,8 +36,6 @@ func main() { authCmd, runCmd, initCmd, - migrateMonolithCmd, - migrateMarketsCmd, backupCmd, restoreCmd, configCmd, @@ -48,7 +46,6 @@ func main() { logCmd, netCmd, pieceDirCmd, - recoverCmd, }, } app.Setup() diff --git a/cmd/boostd/piecedir.go b/cmd/boostd/piecedir.go index 54436b0aa..08854b6aa 100644 --- a/cmd/boostd/piecedir.go +++ b/cmd/boostd/piecedir.go @@ -16,6 +16,7 @@ var pieceDirCmd = &cli.Command{ Usage: "Manage Local Index Directory", Subcommands: []*cli.Command{ pdIndexGenerate, + recoverCmd, }, } diff --git a/cmd/booster-http/e2e_test.go b/cmd/booster-http/e2e_test.go index 24f2076ec..34094df1b 100644 --- a/cmd/booster-http/e2e_test.go +++ b/cmd/booster-http/e2e_test.go @@ -40,7 +40,7 @@ func TestE2E(t *testing.T) { framework.SetLogLevel() t.Log("Starting boost and miner") - boostAndMiner := framework.NewTestFramework(ctx, t, framework.EnableLegacyDeals(true), framework.SetMaxStagingBytes(10485760)) + boostAndMiner := framework.NewTestFramework(ctx, t, framework.SetMaxStagingBytes(10485760)) req.NoError(boostAndMiner.Start()) defer boostAndMiner.Stop() diff --git a/cmd/booster-http/trustless_gateway_test.go b/cmd/booster-http/trustless_gateway_test.go index 49864f286..2d4bea622 100644 --- a/cmd/booster-http/trustless_gateway_test.go +++ b/cmd/booster-http/trustless_gateway_test.go @@ -31,7 +31,7 @@ func TestTrustlessGateway(t *testing.T) { kit.QuietMiningLogs() framework.SetLogLevel() - boostAndMiner := framework.NewTestFramework(ctx, t, framework.EnableLegacyDeals(true), framework.SetMaxStagingBytes(10485760)) + boostAndMiner := framework.NewTestFramework(ctx, t, framework.SetMaxStagingBytes(10485760)) req.NoError(boostAndMiner.Start()) defer boostAndMiner.Stop() diff --git a/gql/resolver_dealpublish.go b/gql/resolver_dealpublish.go index 78cffc48b..e28cb4e85 100644 --- a/gql/resolver_dealpublish.go +++ b/gql/resolver_dealpublish.go @@ -158,7 +158,7 @@ func (r *resolver) DealPublish(ctx context.Context) (*dealPublishResolver, error Deals: basicDeals, Period: int32(pending.PublishPeriod.Seconds()), Start: graphql.Time{Time: pending.PublishPeriodStart}, - MaxDealsPerMsg: int32(r.cfg.LotusDealmaking.MaxDealsPerPublishMsg), + MaxDealsPerMsg: int32(r.cfg.Dealpublish.MaxDealsPerPublishMsg), }, nil } diff --git a/gql/resolver_legacy_storage.go b/gql/resolver_legacy_storage.go deleted file mode 100644 index 623decd15..000000000 --- a/gql/resolver_legacy_storage.go +++ /dev/null @@ -1,35 +0,0 @@ -package gql - -import ( - "errors" - "os" - "path" - - gqltypes "github.com/filecoin-project/boost/gql/types" - "github.com/filecoin-project/boost/util" - lotus_modules "github.com/filecoin-project/lotus/node/modules" -) - -type legacyStorageResolver struct { - Capacity gqltypes.Uint64 - Used gqltypes.Uint64 - MountPoint string -} - -// query: legacyStorage: [LegacyStorage] -func (r *resolver) LegacyStorage() (*legacyStorageResolver, error) { - stagingDir := path.Join(r.repo.Path(), lotus_modules.StagingAreaDirName) - used, err := util.DirSize(stagingDir) - if err != nil { - if !errors.Is(err, os.ErrNotExist) { - return nil, err - } - used = 0 - } - - return &legacyStorageResolver{ - Capacity: gqltypes.Uint64(r.cfg.LotusDealmaking.MaxStagingDealsBytes), - Used: gqltypes.Uint64(used), - MountPoint: stagingDir, - }, nil -} diff --git a/gql/resolver_rtvllog.go b/gql/resolver_rtvllog.go index 2f38d7a38..6fd73f95e 100644 --- a/gql/resolver_rtvllog.go +++ b/gql/resolver_rtvllog.go @@ -212,6 +212,6 @@ func (r *resolver) RetrievalLogsCount(ctx context.Context, args struct{ IsIndexe count, err := r.retDB.Count(ctx, isIndexer) return &retStateCount{ Count: int32(count), - Period: gqltypes.Uint64(r.cfg.Dealmaking.RetrievalLogDuration), + Period: gqltypes.Uint64(r.cfg.Retrievals.GraphsyncRetrievalConfig.RetrievalLogDuration), }, err } diff --git a/gql/resolver_transfers.go b/gql/resolver_transfers.go index e7a20f306..ce1b3dd20 100644 --- a/gql/resolver_transfers.go +++ b/gql/resolver_transfers.go @@ -49,7 +49,7 @@ func (r *resolver) TransferStats(_ context.Context) *transferStats { }) } return &transferStats{ - HttpMaxConcurrentDownloads: int32(r.cfg.Dealmaking.HttpTransferMaxConcurrentDownloads), + HttpMaxConcurrentDownloads: int32(r.cfg.HttpDownload.HttpTransferMaxConcurrentDownloads), Stats: gqlStats, } } diff --git a/gql/schema.graphql b/gql/schema.graphql index e1d8d60d4..93e38da5f 100644 --- a/gql/schema.graphql +++ b/gql/schema.graphql @@ -305,12 +305,6 @@ type Storage { MountPoint: String! } -type LegacyStorage { - Capacity: Uint64! - Used: Uint64! - MountPoint: String! -} - type WaitDeal { ID: ID! Size: Uint64! @@ -597,9 +591,6 @@ type RootQuery { """Get storage space usage""" storage: Storage! - """Get storage space usage of deals made with legacy markets endpoint""" - legacyStorage: LegacyStorage! - """Get sealing pipeline state""" sealingpipeline: SealingPipeline! diff --git a/indexprovider/wrapper.go b/indexprovider/wrapper.go index c04eff7fe..fd3a837a4 100644 --- a/indexprovider/wrapper.go +++ b/indexprovider/wrapper.go @@ -83,9 +83,9 @@ func NewWrapper(cfg *config.Boost) func(lc fx.Lifecycle, h host.Host, r repo.Loc _, isDisabled := prov.(*DisabledIndexProvider) // bitswap is enabled if there is a bitswap peer id - bitswapEnabled := cfg.Dealmaking.BitswapPeerID != "" + bitswapEnabled := cfg.Retrievals.BitswapRetrievalConfig.BitswapPeerID != "" // http is considered enabled if there is an http retrieval multiaddr set - httpEnabled := cfg.Dealmaking.HTTPRetrievalMultiaddr != "" + httpEnabled := cfg.Retrievals.HTTPRetrievalConfig.HTTPRetrievalMultiaddr != "" // setup bitswap extended provider if there is a public multi addr for bitswap w := &Wrapper{ @@ -375,26 +375,26 @@ func (w *Wrapper) appendExtendedProviders(ctx context.Context, adBuilder *xprovi return err } var ep xproviders.Info - if len(w.cfg.Dealmaking.BitswapPublicAddresses) > 0 { - if w.cfg.Dealmaking.BitswapPrivKeyFile == "" { + if len(w.cfg.Retrievals.BitswapRetrievalConfig.BitswapPublicAddresses) > 0 { + if w.cfg.Retrievals.BitswapRetrievalConfig.BitswapPrivKeyFile == "" { return fmt.Errorf("missing required configuration key BitswapPrivKeyFile: " + "boost is configured with BitswapPublicAddresses but the BitswapPrivKeyFile configuration key is empty") } // we need the private key for bitswaps peerID in order to announce publicly - keyFile, err := os.ReadFile(w.cfg.Dealmaking.BitswapPrivKeyFile) + keyFile, err := os.ReadFile(w.cfg.Retrievals.BitswapRetrievalConfig.BitswapPrivKeyFile) if err != nil { - return fmt.Errorf("opening BitswapPrivKeyFile %s: %w", w.cfg.Dealmaking.BitswapPrivKeyFile, err) + return fmt.Errorf("opening BitswapPrivKeyFile %s: %w", w.cfg.Retrievals.BitswapRetrievalConfig.BitswapPrivKeyFile, err) } privKey, err := crypto.UnmarshalPrivateKey(keyFile) if err != nil { - return fmt.Errorf("unmarshalling BitswapPrivKeyFile %s: %w", w.cfg.Dealmaking.BitswapPrivKeyFile, err) + return fmt.Errorf("unmarshalling BitswapPrivKeyFile %s: %w", w.cfg.Retrievals.BitswapRetrievalConfig.BitswapPrivKeyFile, err) } // setup an extended provider record, containing the booster-bitswap multi addr, // peer ID, private key for signing, and metadata ep = xproviders.Info{ - ID: w.cfg.Dealmaking.BitswapPeerID, - Addrs: w.cfg.Dealmaking.BitswapPublicAddresses, + ID: w.cfg.Retrievals.BitswapRetrievalConfig.BitswapPeerID, + Addrs: w.cfg.Retrievals.BitswapRetrievalConfig.BitswapPublicAddresses, Priv: privKey, Metadata: mbytes, } @@ -432,7 +432,7 @@ func (w *Wrapper) appendExtendedProviders(ctx context.Context, adBuilder *xprovi } var ep = xproviders.Info{ ID: w.h.ID().String(), - Addrs: []string{w.cfg.Dealmaking.HTTPRetrievalMultiaddr}, + Addrs: []string{w.cfg.Retrievals.HTTPRetrievalConfig.HTTPRetrievalMultiaddr}, Metadata: mbytes, Priv: key, } diff --git a/itests/data_segment_index_retrieval_test.go b/itests/data_segment_index_retrieval_test.go index ed03c2705..e826aba5c 100644 --- a/itests/data_segment_index_retrieval_test.go +++ b/itests/data_segment_index_retrieval_test.go @@ -24,7 +24,6 @@ func TestDataSegmentIndexRetrieval(t *testing.T) { kit.QuietMiningLogs() framework.SetLogLevel() var opts []framework.FrameworkOpts - opts = append(opts, framework.EnableLegacyDeals(true)) opts = append(opts, framework.SetMaxStagingBytes(10000000)) // 10 MB f := framework.NewTestFramework(ctx, t, opts...) err := f.Start() diff --git a/itests/dummydeal_offline_test.go b/itests/dummydeal_offline_test.go index fb11e06f6..0327fccab 100644 --- a/itests/dummydeal_offline_test.go +++ b/itests/dummydeal_offline_test.go @@ -19,7 +19,6 @@ func TestDummydealOffline(t *testing.T) { kit.QuietMiningLogs() framework.SetLogLevel() var opts []framework.FrameworkOpts - opts = append(opts, framework.EnableLegacyDeals(true)) f := framework.NewTestFramework(ctx, t, opts...) err := f.Start() require.NoError(t, err) diff --git a/itests/dummydeal_podsi_test.go b/itests/dummydeal_podsi_test.go index 2a9a3ccb6..a1216e78e 100644 --- a/itests/dummydeal_podsi_test.go +++ b/itests/dummydeal_podsi_test.go @@ -30,7 +30,7 @@ import ( "github.com/ipld/go-ipld-prime" cidlink "github.com/ipld/go-ipld-prime/linking/cid" "github.com/multiformats/go-multicodec" - multihash "github.com/multiformats/go-multihash" + "github.com/multiformats/go-multihash" "github.com/stretchr/testify/require" ) @@ -43,7 +43,7 @@ func TestDummyPodsiDealOnline(t *testing.T) { kit.QuietMiningLogs() framework.SetLogLevel() var opts []framework.FrameworkOpts - opts = append(opts, framework.EnableLegacyDeals(true), framework.SetMaxStagingBytes(10e9), framework.SetProvisionalWalletBalances(9e18)) + opts = append(opts, framework.SetMaxStagingBytes(10e9), framework.SetProvisionalWalletBalances(9e18)) f := framework.NewTestFramework(ctx, t, opts...) err := f.Start() require.NoError(t, err) diff --git a/itests/dummydeal_test.go b/itests/dummydeal_test.go index b24ea3c10..607882157 100644 --- a/itests/dummydeal_test.go +++ b/itests/dummydeal_test.go @@ -23,7 +23,6 @@ func TestDummydealOnline(t *testing.T) { kit.QuietMiningLogs() framework.SetLogLevel() var opts []framework.FrameworkOpts - opts = append(opts, framework.EnableLegacyDeals(true)) f := framework.NewTestFramework(ctx, t, opts...) err := f.Start() require.NoError(t, err) diff --git a/itests/framework/framework.go b/itests/framework/framework.go index 09f8db7d3..439bd712a 100644 --- a/itests/framework/framework.go +++ b/itests/framework/framework.go @@ -97,7 +97,6 @@ var Log = logging.Logger("boosttest") type TestFrameworkConfig struct { Ensemble *kit.Ensemble - EnableLegacy bool MaxStagingBytes int64 ProvisionalWalletBalances int64 } @@ -119,12 +118,6 @@ type TestFramework struct { type FrameworkOpts func(pc *TestFrameworkConfig) -func EnableLegacyDeals(enable bool) FrameworkOpts { - return func(tmc *TestFrameworkConfig) { - tmc.EnableLegacy = enable - } -} - func SetMaxStagingBytes(max int64) FrameworkOpts { return func(tmc *TestFrameworkConfig) { tmc.MaxStagingBytes = max @@ -362,21 +355,18 @@ func (f *TestFramework) Start(opts ...ConfigOpt) error { cfg.Wallets.Miner = minerAddr.String() cfg.Wallets.PublishStorageDeals = psdWalletAddr.String() cfg.Wallets.DealCollateral = dealCollatAddr.String() - cfg.LotusDealmaking.MaxDealsPerPublishMsg = 1 - cfg.LotusDealmaking.PublishMsgPeriod = lotus_config.Duration(0) + cfg.Dealpublish.MaxDealsPerPublishMsg = 1 + cfg.Dealpublish.PublishMsgPeriod = config.Duration(0) val, err := ltypes.ParseFIL("0.1 FIL") if err != nil { return err } - cfg.LotusFees.MaxPublishDealsFee = val + cfg.Dealpublish.MaxPublishDealsFee = val cfg.Dealmaking.RemoteCommp = true // No transfers will start until the first stall check period has elapsed - cfg.Dealmaking.HttpTransferStallCheckPeriod = config.Duration(100 * time.Millisecond) + cfg.HttpDownload.HttpTransferStallCheckPeriod = config.Duration(100 * time.Millisecond) cfg.Storage.ParallelFetchLimit = 10 - if f.config.EnableLegacy { - cfg.Dealmaking.EnableLegacyStorageDeals = true - } for _, o := range opts { o(cfg) diff --git a/itests/graphsync_identity_cid_test.go b/itests/graphsync_identity_cid_test.go index a0e9a80d1..efdbc2bad 100644 --- a/itests/graphsync_identity_cid_test.go +++ b/itests/graphsync_identity_cid_test.go @@ -37,7 +37,6 @@ func TestDealAndRetrievalWithIdentityCID(t *testing.T) { kit.QuietMiningLogs() framework.SetLogLevel() var opts []framework.FrameworkOpts - opts = append(opts, framework.EnableLegacyDeals(true)) opts = append(opts, framework.WithMaxStagingDealsBytes(10000000)) f := framework.NewTestFramework(ctx, t, opts...) err := f.Start() diff --git a/itests/graphsync_retrieval_test.go b/itests/graphsync_retrieval_test.go index 5d3c59506..7ec81cf71 100644 --- a/itests/graphsync_retrieval_test.go +++ b/itests/graphsync_retrieval_test.go @@ -24,7 +24,6 @@ func TestDealRetrieval(t *testing.T) { kit.QuietMiningLogs() framework.SetLogLevel() var opts []framework.FrameworkOpts - opts = append(opts, framework.EnableLegacyDeals(true)) opts = append(opts, framework.WithMaxStagingDealsBytes(10000000)) f := framework.NewTestFramework(ctx, t, opts...) err := f.Start() diff --git a/itests/ipni_publish_test.go b/itests/ipni_publish_test.go index 563c916f9..dfdee16f1 100644 --- a/itests/ipni_publish_test.go +++ b/itests/ipni_publish_test.go @@ -25,7 +25,6 @@ func TestIPNIPublish(t *testing.T) { kit.QuietMiningLogs() framework.SetLogLevel() var opts []framework.FrameworkOpts - opts = append(opts, framework.EnableLegacyDeals(true)) f := framework.NewTestFramework(ctx, t, opts...) err := f.Start() require.NoError(t, err) diff --git a/itests/shared/multiminer.go b/itests/shared/multiminer.go index 69842fdef..11dc9bf79 100644 --- a/itests/shared/multiminer.go +++ b/itests/shared/multiminer.go @@ -37,7 +37,7 @@ func RunMultiminerRetrievalTest(t *testing.T, rt func(ctx context.Context, t *te // Set up two miners, each with a separate boost instance connected to it ensemble := kit.NewEnsemble(t) var opts []framework.FrameworkOpts - opts = append(opts, framework.EnableLegacyDeals(true), framework.WithEnsemble(ensemble)) + opts = append(opts, framework.WithEnsemble(ensemble)) boostAndMiner1 := framework.NewTestFramework(ctx, t, opts...) boostAndMiner2 := framework.NewTestFramework(ctx, t, opts...) ensemble.Start() @@ -61,7 +61,7 @@ func RunMultiminerRetrievalTest(t *testing.T, rt func(ctx context.Context, t *te // Set up the second boost instance so that it can read sector data // not only from the second miner, but also from the first miner - cfg.Dealmaking.GraphsyncStorageAccessApiInfo = []string{cfg.SectorIndexApiInfo, miner1ApiInfo} + cfg.Retrievals.GraphsyncRetrievalConfig.GraphsyncStorageAccessApiInfo = []string{cfg.SectorIndexApiInfo, miner1ApiInfo} // Set up some other ports so they don't clash cfg.Graphql.Port = 8081 diff --git a/markets/storageadapter/dealpublisher.go b/markets/storageadapter/dealpublisher.go index 1b09c8391..54aeb952f 100644 --- a/markets/storageadapter/dealpublisher.go +++ b/markets/storageadapter/dealpublisher.go @@ -8,6 +8,7 @@ import ( "time" cborutil "github.com/filecoin-project/go-cbor-util" + "github.com/filecoin-project/go-state-types/exitcode" "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log/v2" "go.uber.org/fx" @@ -18,13 +19,10 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/builtin/v9/market" - "github.com/filecoin-project/go-state-types/exitcode" - "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/storage/ctladdr" ) @@ -113,13 +111,13 @@ type PublishMsgConfig struct { } func NewDealPublisher( - feeConfig *config.MinerFeeConfig, + maxPublishDealsFee *types.FIL, publishMsgCfg PublishMsgConfig, ) func(lc fx.Lifecycle, full api.FullNode, as *ctladdr.AddressSelector) *DealPublisher { return func(lc fx.Lifecycle, full api.FullNode, as *ctladdr.AddressSelector) *DealPublisher { maxFee := abi.NewTokenAmount(0) - if feeConfig != nil { - maxFee = abi.TokenAmount(feeConfig.MaxPublishDealsFee) + if maxPublishDealsFee != nil { + maxFee = abi.TokenAmount(*maxPublishDealsFee) } publishSpec := &api.MessageSendSpec{MaxFee: maxFee} dp := newDealPublisher(full, as, publishMsgCfg, publishSpec) diff --git a/node/builder.go b/node/builder.go index 6bc8fc783..97d85c61a 100644 --- a/node/builder.go +++ b/node/builder.go @@ -413,18 +413,6 @@ var BoostNode = Options( ) func ConfigBoost(cfg *config.Boost) Option { - pricingConfig := cfg.Dealmaking.RetrievalPricing - if pricingConfig.Strategy == config.RetrievalPricingExternalMode { - if pricingConfig.External == nil { - return Error(errors.New("retrieval pricing policy has been to set to external but external policy config is nil")) - } - - if pricingConfig.External.Path == "" { - return Error(errors.New("retrieval pricing policy has been to set to external but external script path is empty")) - } - } else if pricingConfig.Strategy != config.RetrievalPricingDefaultMode { - return Error(errors.New("retrieval pricing policy must be either default or external")) - } collatWalletStr := cfg.Wallets.DealCollateral if collatWalletStr == "" && cfg.Wallets.PledgeCollateral != "" { // nolint:staticcheck @@ -450,8 +438,6 @@ func ConfigBoost(cfg *config.Boost) Option { return Error(errors.New("HttpDownload.NChunks should be between 1 and 16")) } - legacyFees := cfg.LotusFees.Legacy() - return Options( ConfigCommon(&cfg.Common), @@ -471,7 +457,7 @@ func ConfigBoost(cfg *config.Boost) Option { StorageMiner: walletMiner, CollatWallet: walletDealCollat, PubMsgWallet: walletPSD, - PubMsgBalMin: abi.TokenAmount(cfg.LotusFees.MaxPublishDealsFee), + PubMsgBalMin: abi.TokenAmount(cfg.Dealpublish.MaxPublishDealsFee), })), Override(new(*storagemanager.StorageManager), storagemanager.New(storagemanager.Config{ @@ -517,7 +503,7 @@ func ConfigBoost(cfg *config.Boost) Option { Override(new(dtypes.ProviderTransferNetwork), modules.NewProviderTransferNetwork), Override(StartProviderDataTransferKey, server.NewProviderDataTransfer), Override(new(server.RetrievalAskGetter), server.NewRetrievalAskGetter), - Override(new(*server.GraphsyncUnpaidRetrieval), modules.RetrievalGraphsync(cfg.LotusDealmaking.SimultaneousTransfersForStorage, cfg.LotusDealmaking.SimultaneousTransfersForStoragePerClient, cfg.LotusDealmaking.SimultaneousTransfersForRetrieval)), + Override(new(*server.GraphsyncUnpaidRetrieval), modules.RetrievalGraphsync(cfg.Retrievals.GraphsyncRetrievalConfig.SimultaneousTransfersForRetrieval)), Override(new(dtypes.StagingGraphsync), From(new(*server.GraphsyncUnpaidRetrieval))), Override(StartPieceDoctorKey, modules.NewPieceDoctor), @@ -530,7 +516,7 @@ func ConfigBoost(cfg *config.Boost) Option { // Lotus Markets (retrieval) Override(new(server.SectorAccessor), modules.NewSectorAccessor(cfg)), - Override(HandleRetrievalEventsKey, modules.HandleRetrievalGraphsyncUpdates(time.Duration(cfg.Dealmaking.RetrievalLogDuration), time.Duration(cfg.Dealmaking.StalledRetrievalTimeout))), + Override(HandleRetrievalEventsKey, modules.HandleRetrievalGraphsyncUpdates(time.Duration(cfg.Retrievals.GraphsyncRetrievalConfig.RetrievalLogDuration), time.Duration(cfg.Retrievals.GraphsyncRetrievalConfig.StalledRetrievalTimeout))), Override(HandleRetrievalAskKey, modules.HandleQueryAsk), Override(new(*lp2pimpl.TransportsListener), modules.NewTransportsListener(cfg)), Override(new(*protocolproxy.ProtocolProxy), modules.NewProtocolProxy(cfg)), @@ -553,15 +539,15 @@ func ConfigBoost(cfg *config.Boost) Option { // Boost retrieval deal filter Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(nil)), - If(cfg.Dealmaking.RetrievalFilter != "", - Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(dtypes.RetrievalDealFilter(dealfilter.CliRetrievalDealFilter(cfg.Dealmaking.RetrievalFilter)))), + If(cfg.Retrievals.GraphsyncRetrievalConfig.RetrievalFilter != "", + Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(dtypes.RetrievalDealFilter(dealfilter.CliRetrievalDealFilter(cfg.Retrievals.GraphsyncRetrievalConfig.RetrievalFilter)))), ), - Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(&legacyFees, storageadapter.PublishMsgConfig{ - Period: time.Duration(cfg.LotusDealmaking.PublishMsgPeriod), - MaxDealsPerMsg: cfg.LotusDealmaking.MaxDealsPerPublishMsg, + Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(&cfg.Dealpublish.MaxPublishDealsFee, storageadapter.PublishMsgConfig{ + Period: time.Duration(cfg.Dealpublish.PublishMsgPeriod), + MaxDealsPerMsg: cfg.Dealpublish.MaxDealsPerPublishMsg, StartEpochSealingBuffer: cfg.Dealmaking.StartEpochSealingBuffer, - ManualDealPublish: cfg.Dealmaking.ManualDealPublish, + ManualDealPublish: cfg.Dealpublish.ManualDealPublish, })), Override(new(sealer.Unsealer), From(new(lotus_modules.MinerStorageService))), diff --git a/node/config/def.go b/node/config/def.go index 989315a98..ec91921c7 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -11,14 +11,6 @@ import ( "github.com/ipfs/go-cid" ) -const ( - // RetrievalPricingDefault configures the node to use the default retrieval pricing policy. - RetrievalPricingDefaultMode = "default" - // RetrievalPricingExternal configures the node to use the external retrieval pricing script - // configured by the user. - RetrievalPricingExternalMode = "external" -) - // MaxTraversalLinks configures the maximum number of links to traverse in a DAG while calculating // CommP and traversing a DAG with graphsync; invokes a budget on DAG depth and density. var MaxTraversalLinks uint64 = 32 * (1 << 20) @@ -55,7 +47,6 @@ func defCommon() Common { } -var DefaultDefaultMaxFee = types.MustParseFIL("0.07") var DefaultSimultaneousTransfers = uint64(20) func DefaultBoost() *Boost { @@ -104,91 +95,25 @@ func DefaultBoost() *Boost { }, Dealmaking: DealmakingConfig{ - ConsiderOnlineStorageDeals: true, - ConsiderOfflineStorageDeals: true, - ConsiderOnlineRetrievalDeals: true, - ConsiderOfflineRetrievalDeals: true, - ConsiderVerifiedStorageDeals: true, - ConsiderUnverifiedStorageDeals: true, - PieceCidBlocklist: []cid.Cid{}, - // TODO: It'd be nice to set this based on sector size + ConsiderOnlineStorageDeals: true, + ConsiderOfflineStorageDeals: true, + ConsiderOnlineRetrievalDeals: true, + ConsiderOfflineRetrievalDeals: true, + ConsiderVerifiedStorageDeals: true, + ConsiderUnverifiedStorageDeals: true, + PieceCidBlocklist: []cid.Cid{}, MaxDealStartDelay: Duration(time.Hour * 24 * 14), ExpectedSealDuration: Duration(time.Hour * 24), MaxProviderCollateralMultiplier: 2, - - StartEpochSealingBuffer: 480, // 480 epochs buffer == 4 hours from adding deal to sector to sector being sealed - - DealProposalLogDuration: Duration(time.Hour * 24), - RetrievalLogDuration: Duration(time.Hour * 24), - StalledRetrievalTimeout: Duration(time.Second * 30), - - RetrievalPricing: &lotus_config.RetrievalPricing{ - Strategy: RetrievalPricingDefaultMode, - Default: &lotus_config.RetrievalPricingDefault{ - VerifiedDealsFreeTransfer: true, - }, - External: &lotus_config.RetrievalPricingExternal{ - Path: "", - }, - }, - - // This should no longer be needed once LID is live - BlockstoreCacheMaxShards: 20, // Match default simultaneous retrievals - BlockstoreCacheExpiry: Duration(30 * time.Second), - - IsUnsealedCacheExpiry: Duration(5 * time.Minute), - - MaxTransferDuration: Duration(24 * 3600 * time.Second), - - RemoteCommp: false, - MaxConcurrentLocalCommp: 1, - - HttpTransferMaxConcurrentDownloads: 20, - HttpTransferStallTimeout: Duration(5 * time.Minute), - HttpTransferStallCheckPeriod: Duration(30 * time.Second), - DealLogDurationDays: 30, - SealingPipelineCacheTimeout: Duration(30 * time.Second), - FundsTaggingEnabled: true, - EnableLegacyStorageDeals: false, - ManualDealPublish: false, - BitswapPublicAddresses: []string{}, - }, - - LotusDealmaking: lotus_config.DealmakingConfig{ - ConsiderOnlineStorageDeals: true, - ConsiderOfflineStorageDeals: true, - ConsiderOnlineRetrievalDeals: true, - ConsiderOfflineRetrievalDeals: true, - ConsiderVerifiedStorageDeals: true, - ConsiderUnverifiedStorageDeals: true, - PieceCidBlocklist: []cid.Cid{}, - // TODO: It'd be nice to set this based on sector size - MaxDealStartDelay: lotus_config.Duration(time.Hour * 24 * 14), - ExpectedSealDuration: lotus_config.Duration(time.Hour * 24), - PublishMsgPeriod: lotus_config.Duration(time.Hour), - MaxDealsPerPublishMsg: 8, - MaxProviderCollateralMultiplier: 2, - - SimultaneousTransfersForStorage: DefaultSimultaneousTransfers, - SimultaneousTransfersForStoragePerClient: 0, - SimultaneousTransfersForRetrieval: DefaultSimultaneousTransfers, - - StartEpochSealingBuffer: 480, // 480 epochs buffer == 4 hours from adding deal to sector to sector being sealed - - RetrievalPricing: &lotus_config.RetrievalPricing{ - Strategy: RetrievalPricingDefaultMode, - Default: &lotus_config.RetrievalPricingDefault{ - VerifiedDealsFreeTransfer: true, - }, - External: &lotus_config.RetrievalPricingExternal{ - Path: "", - }, - }, - }, - - LotusFees: FeeConfig{ - MaxPublishDealsFee: types.MustParseFIL("0.05"), - MaxMarketBalanceAddFee: types.MustParseFIL("0.007"), + StartEpochSealingBuffer: 480, // 480 epochs buffer == 4 hours from adding deal to sector to sector being sealed + DealProposalLogDuration: Duration(time.Hour * 24), + IsUnsealedCacheExpiry: Duration(5 * time.Minute), + MaxTransferDuration: Duration(24 * 3600 * time.Second), + RemoteCommp: false, + MaxConcurrentLocalCommp: 1, + DealLogDurationDays: 30, + SealingPipelineCacheTimeout: Duration(30 * time.Second), + FundsTaggingEnabled: true, }, DAGStore: lotus_config.DAGStoreConfig{ @@ -222,8 +147,31 @@ func DefaultBoost() *Boost { DataTransferPublisher: false, }, HttpDownload: HttpDownloadConfig{ - NChunks: 5, - AllowPrivateIPs: false, + HttpTransferMaxConcurrentDownloads: 20, + HttpTransferStallTimeout: Duration(5 * time.Minute), + HttpTransferStallCheckPeriod: Duration(30 * time.Second), + NChunks: 5, + AllowPrivateIPs: false, + }, + Retrievals: RetrievalConfig{ + GraphsyncRetrievalConfig{ + SimultaneousTransfersForRetrieval: DefaultSimultaneousTransfers, + RetrievalLogDuration: Duration(time.Hour * 24), + StalledRetrievalTimeout: Duration(time.Second * 30), + GraphsyncStorageAccessApiInfo: []string{}, + }, + BitswapRetrievalConfig{ + BitswapPublicAddresses: []string{}, + }, + HTTPRetrievalConfig{ + HTTPRetrievalMultiaddr: "", + }, + }, + Dealpublish: DealPublishConfig{ + ManualDealPublish: false, + PublishMsgPeriod: Duration(time.Hour), + MaxDealsPerPublishMsg: 8, + MaxPublishDealsFee: types.MustParseFIL("0.05"), }, } return cfg diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index f5fc43cd2..03b0a7f3d 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -18,6 +18,44 @@ var Doc = map[string][]DocField{ your node if metadata log is disabled`, }, }, + "BitswapRetrievalConfig": []DocField{ + { + Name: "BitswapPeerID", + Type: "string", + + Comment: `The libp2p peer id used by booster-bitswap. +Run 'booster-bitswap init' to get the peer id. +When BitswapPeerID is not empty boostd will: +- listen on bitswap protocols on boostd's own peer id and proxy +requests to booster-bitswap +- advertise boostd's peer id in bitswap records to the content indexer +(bitswap clients connect to boostd, which proxies the requests to +booster-bitswap) +- list bitswap as an available transport on the retrieval transport protocol`, + }, + { + Name: "BitswapPublicAddresses", + Type: "[]string", + + Comment: `Public multiaddresses for booster-bitswap. +If empty +- booster-bitswap is assumed to be running privately +- boostd acts as a proxy: it listens on bitswap protocols on boostd's own +peer id and forwards them to booster-bitswap +If public addresses are set +- boostd announces the booster-bitswap peer id to the indexer as an +extended provider +- clients make connections directly to the booster-bitswap process +(boostd does not act as a proxy)`, + }, + { + Name: "BitswapPrivKeyFile", + Type: "string", + + Comment: `If operating in public mode, in order to announce booster-bitswap as an extended provider, this value must point to a +a file containing the booster-bitswap peer id's private key. Can be left blank when operating with protocol proxy.`, + }, + }, "Boost": []DocField{ { Name: "ConfigVersion", @@ -49,6 +87,12 @@ your node if metadata log is disabled`, Comment: ``, }, + { + Name: "Dealpublish", + Type: "DealPublishConfig", + + Comment: ``, + }, { Name: "Wallets", Type: "WalletsConfig", @@ -92,14 +136,8 @@ your node if metadata log is disabled`, Comment: ``, }, { - Name: "LotusDealmaking", - Type: "lotus_config.DealmakingConfig", - - Comment: `Lotus configs`, - }, - { - Name: "LotusFees", - Type: "FeeConfig", + Name: "Retrievals", + Type: "RetrievalConfig", Comment: ``, }, @@ -162,6 +200,38 @@ your node if metadata log is disabled`, Comment: `From address for eth_ state call`, }, }, + "DealPublishConfig": []DocField{ + { + Name: "ManualDealPublish", + Type: "bool", + + Comment: `When set to true, the user is responsible for publishing deals manually. +The values of MaxDealsPerPublishMsg and PublishMsgPeriod will be +ignored, and deals will remain in the pending state until manually published.`, + }, + { + Name: "PublishMsgPeriod", + Type: "Duration", + + Comment: `When a deal is ready to publish, the amount of time to wait for more +deals to be ready to publish before publishing them all as a batch`, + }, + { + Name: "MaxDealsPerPublishMsg", + Type: "uint64", + + Comment: `The maximum number of deals to include in a single PublishStorageDeals +message`, + }, + { + Name: "MaxPublishDealsFee", + Type: "types.FIL", + + Comment: `The maximum collateral that the provider will put up against a deal, +as a multiplier of the minimum collateral bound +The maximum fee to pay when sending the PublishStorageDeals message`, + }, + }, "DealmakingConfig": []DocField{ { Name: "ConsiderOnlineStorageDeals", @@ -267,20 +337,6 @@ Set this value to 0 to indicate there is no limit per host.`, Comment: `The amount of time to keep deal proposal logs for before cleaning them up.`, }, - { - Name: "RetrievalLogDuration", - Type: "Duration", - - Comment: `The amount of time to keep retrieval deal logs for before cleaning them up. -Note RetrievalLogDuration should exceed the StalledRetrievalTimeout as the -logs db is leveraged for pruning stalled retrievals.`, - }, - { - Name: "StalledRetrievalTimeout", - Type: "Duration", - - Comment: `The amount of time stalled retrieval deals will remain open before being canceled.`, - }, { Name: "Filter", Type: "string", @@ -288,32 +344,6 @@ logs db is leveraged for pruning stalled retrievals.`, Comment: `A command used for fine-grained evaluation of storage deals see https://boost.filecoin.io/configuration/deal-filters for more details`, }, - { - Name: "RetrievalFilter", - Type: "string", - - Comment: `A command used for fine-grained evaluation of retrieval deals -see https://boost.filecoin.io/configuration/deal-filters for more details`, - }, - { - Name: "RetrievalPricing", - Type: "*lotus_config.RetrievalPricing", - - Comment: ``, - }, - { - Name: "BlockstoreCacheMaxShards", - Type: "int", - - Comment: `The maximum number of shards cached by the Dagstore for retrieval -Lower this limit if boostd memory is too high during retrievals`, - }, - { - Name: "BlockstoreCacheExpiry", - Type: "Duration", - - Comment: `How long a blockstore shard should be cached before expiring without use`, - }, { Name: "IsUnsealedCacheExpiry", Type: "Duration", @@ -339,70 +369,6 @@ Please note that this only works for v1.2.0 deals and not legacy deals`, Comment: `The maximum number of commp processes to run in parallel on the local boost process`, - }, - { - Name: "HTTPRetrievalMultiaddr", - Type: "string", - - Comment: `The public multi-address for retrieving deals with booster-http. -Note: Must be in multiaddr format, eg /dns/foo.com/tcp/443/https`, - }, - { - Name: "HttpTransferMaxConcurrentDownloads", - Type: "uint64", - - Comment: `The maximum number of concurrent storage deal HTTP downloads. -Note that this is a soft maximum; if some downloads stall, -more downloads are allowed to start.`, - }, - { - Name: "HttpTransferStallCheckPeriod", - Type: "Duration", - - Comment: `The period between checking if downloads have stalled.`, - }, - { - Name: "HttpTransferStallTimeout", - Type: "Duration", - - Comment: `The time that can elapse before a download is considered stalled (and -another concurrent download is allowed to start).`, - }, - { - Name: "BitswapPeerID", - Type: "string", - - Comment: `The libp2p peer id used by booster-bitswap. -Run 'booster-bitswap init' to get the peer id. -When BitswapPeerID is not empty boostd will: -- listen on bitswap protocols on boostd's own peer id and proxy -requests to booster-bitswap -- advertise boostd's peer id in bitswap records to the content indexer -(bitswap clients connect to boostd, which proxies the requests to -booster-bitswap) -- list bitswap as an available transport on the retrieval transport protocol`, - }, - { - Name: "BitswapPublicAddresses", - Type: "[]string", - - Comment: `Public multiaddresses for booster-bitswap. -If empty -- booster-bitswap is assumed to be running privately -- boostd acts as a proxy: it listens on bitswap protocols on boostd's own -peer id and forwards them to booster-bitswap -If public addresses are set -- boostd announces the booster-bitswap peer id to the indexer as an -extended provider -- clients make connections directly to the booster-bitswap process -(boostd does not act as a proxy)`, - }, - { - Name: "BitswapPrivKeyFile", - Type: "string", - - Comment: `If operating in public mode, in order to announce booster-bitswap as an extended provider, this value must point to a -a file containing the booster-bitswap peer id's private key. Can be left blank when operating with protocol proxy.`, }, { Name: "DealLogDurationDays", @@ -427,20 +393,41 @@ Any value less than 0 will result in use of default`, accepted boost will tag funds for that deal so that they cannot be used for any other deal.`, }, + }, + "GraphqlConfig": []DocField{ { - Name: "EnableLegacyStorageDeals", - Type: "bool", + Name: "ListenAddress", + Type: "string", - Comment: `Whether to enable legacy deals on the Boost node or not. We recommend keeping -them disabled. These will be completely deprecated soon.`, + Comment: `The ip address the GraphQL server will bind to. Default: 127.0.0.1`, }, { - Name: "ManualDealPublish", - Type: "bool", + Name: "Port", + Type: "uint64", - Comment: `When set to true, the user is responsible for publishing deals manually. -The values of MaxDealsPerPublishMsg and PublishMsgPeriod will be -ignored, and deals will remain in the pending state until manually published.`, + Comment: `The port that the graphql server listens on`, + }, + }, + "GraphsyncRetrievalConfig": []DocField{ + { + Name: "SimultaneousTransfersForRetrieval", + Type: "uint64", + + Comment: `The maximum number of parallel online data transfers for retrieval deals`, + }, + { + Name: "RetrievalLogDuration", + Type: "Duration", + + Comment: `The amount of time to keep retrieval deal logs for before cleaning them up. +Note RetrievalLogDuration should exceed the StalledRetrievalTimeout as the +logs db is leveraged for pruning stalled retrievals.`, + }, + { + Name: "StalledRetrievalTimeout", + Type: "Duration", + + Comment: `The amount of time stalled retrieval deals will remain open before being canceled.`, }, { Name: "GraphsyncStorageAccessApiInfo", @@ -451,36 +438,45 @@ sector data from when serving graphsync retrievals. If this parameter is not set, boost will serve data from the endpoint configured in SectorIndexApiInfo.`, }, - }, - "FeeConfig": []DocField{ { - Name: "MaxPublishDealsFee", - Type: "types.FIL", + Name: "RetrievalFilter", + Type: "string", - Comment: `The maximum fee to pay when sending the PublishStorageDeals message`, + Comment: `A command used for fine-grained evaluation of retrieval deals +see https://boost.filecoin.io/configuration/deal-filters for more details`, }, + }, + "HTTPRetrievalConfig": []DocField{ { - Name: "MaxMarketBalanceAddFee", - Type: "types.FIL", + Name: "HTTPRetrievalMultiaddr", + Type: "string", - Comment: `The maximum fee to pay when sending the AddBalance message (used by legacy markets)`, + Comment: `The public multi-address for retrieving deals with booster-http. +Note: Must be in multiaddr format, eg /dns/foo.com/tcp/443/https`, }, }, - "GraphqlConfig": []DocField{ + "HttpDownloadConfig": []DocField{ { - Name: "ListenAddress", - Type: "string", + Name: "HttpTransferMaxConcurrentDownloads", + Type: "uint64", - Comment: `The ip address the GraphQL server will bind to. Default: 127.0.0.1`, + Comment: `The maximum number of concurrent storage deal HTTP downloads. +Note that this is a soft maximum; if some downloads stall, +more downloads are allowed to start.`, }, { - Name: "Port", - Type: "uint64", + Name: "HttpTransferStallCheckPeriod", + Type: "Duration", - Comment: `The port that the graphql server listens on`, + Comment: `The period between checking if downloads have stalled.`, + }, + { + Name: "HttpTransferStallTimeout", + Type: "Duration", + + Comment: `The time that can elapse before a download is considered stalled (and +another concurrent download is allowed to start).`, }, - }, - "HttpDownloadConfig": []DocField{ { Name: "NChunks", Type: "int", @@ -695,105 +691,6 @@ Set this value to "" if the local index directory data service is embedded.`, Comment: `The yugabyte cassandra hosts eg ["127.0.0.1"]`, }, }, - "LotusDealmakingConfig": []DocField{ - { - Name: "PieceCidBlocklist", - Type: "[]cid.Cid", - - Comment: `A list of Data CIDs to reject when making deals`, - }, - { - Name: "ExpectedSealDuration", - Type: "Duration", - - Comment: `Maximum expected amount of time getting the deal into a sealed sector will take -This includes the time the deal will need to get transferred and published -before being assigned to a sector`, - }, - { - Name: "MaxDealStartDelay", - Type: "Duration", - - Comment: `Maximum amount of time proposed deal StartEpoch can be in future`, - }, - { - Name: "PublishMsgPeriod", - Type: "Duration", - - Comment: `When a deal is ready to publish, the amount of time to wait for more -deals to be ready to publish before publishing them all as a batch`, - }, - { - Name: "MaxDealsPerPublishMsg", - Type: "uint64", - - Comment: `The maximum number of deals to include in a single PublishStorageDeals -message`, - }, - { - Name: "MaxProviderCollateralMultiplier", - Type: "uint64", - - Comment: `The maximum collateral that the provider will put up against a deal, -as a multiplier of the minimum collateral bound`, - }, - { - Name: "MaxStagingDealsBytes", - Type: "int64", - - Comment: `The maximum allowed disk usage size in bytes of staging deals not yet -passed to the sealing node by the markets service. 0 is unlimited.`, - }, - { - Name: "SimultaneousTransfersForStorage", - Type: "uint64", - - Comment: `The maximum number of parallel online data transfers for storage deals`, - }, - { - Name: "SimultaneousTransfersForStoragePerClient", - Type: "uint64", - - Comment: `The maximum number of simultaneous data transfers from any single client -for storage deals. -Unset by default (0), and values higher than SimultaneousTransfersForStorage -will have no effect; i.e. the total number of simultaneous data transfers -across all storage clients is bound by SimultaneousTransfersForStorage -regardless of this number.`, - }, - { - Name: "SimultaneousTransfersForRetrieval", - Type: "uint64", - - Comment: `The maximum number of parallel online data transfers for retrieval deals`, - }, - { - Name: "StartEpochSealingBuffer", - Type: "uint64", - - Comment: `Minimum start epoch buffer to give time for sealing of sector with deal.`, - }, - { - Name: "Filter", - Type: "string", - - Comment: `A command used for fine-grained evaluation of storage deals -see https://boost.filecoin.io/configuration/deal-filters for more details`, - }, - { - Name: "RetrievalFilter", - Type: "string", - - Comment: `A command used for fine-grained evaluation of retrieval deals -see https://boost.filecoin.io/configuration/deal-filters for more details`, - }, - { - Name: "RetrievalPricing", - Type: "*lotus_config.RetrievalPricing", - - Comment: ``, - }, - }, "MonitoringConfig": []DocField{ { Name: "MpoolAlertEpochs", diff --git a/node/config/types.go b/node/config/types.go index 8585d1105..efddc6f2f 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -41,6 +41,7 @@ type Boost struct { SectorIndexApiInfo string Dealmaking DealmakingConfig + Dealpublish DealPublishConfig Wallets WalletsConfig Graphql GraphqlConfig Monitoring MonitoringConfig @@ -48,20 +49,9 @@ type Boost struct { LocalIndexDirectory LocalIndexDirectoryConfig ContractDeals ContractDealsConfig HttpDownload HttpDownloadConfig - - // Lotus configs - LotusDealmaking lotus_config.DealmakingConfig - LotusFees FeeConfig - DAGStore lotus_config.DAGStoreConfig - IndexProvider IndexProviderConfig -} - -func (b *Boost) GetDealmakingConfig() lotus_config.DealmakingConfig { - return b.LotusDealmaking -} - -func (b *Boost) SetDealmakingConfig(other lotus_config.DealmakingConfig) { - b.LotusDealmaking = other + Retrievals RetrievalConfig + DAGStore lotus_config.DAGStoreConfig + IndexProvider IndexProviderConfig } type WalletsConfig struct { @@ -89,51 +79,6 @@ type TracingConfig struct { Endpoint string } -type LotusDealmakingConfig struct { - // A list of Data CIDs to reject when making deals - PieceCidBlocklist []cid.Cid - // Maximum expected amount of time getting the deal into a sealed sector will take - // This includes the time the deal will need to get transferred and published - // before being assigned to a sector - ExpectedSealDuration Duration - // Maximum amount of time proposed deal StartEpoch can be in future - MaxDealStartDelay Duration - // When a deal is ready to publish, the amount of time to wait for more - // deals to be ready to publish before publishing them all as a batch - PublishMsgPeriod Duration - // The maximum number of deals to include in a single PublishStorageDeals - // message - MaxDealsPerPublishMsg uint64 - // The maximum collateral that the provider will put up against a deal, - // as a multiplier of the minimum collateral bound - MaxProviderCollateralMultiplier uint64 - // The maximum allowed disk usage size in bytes of staging deals not yet - // passed to the sealing node by the markets service. 0 is unlimited. - MaxStagingDealsBytes int64 - // The maximum number of parallel online data transfers for storage deals - SimultaneousTransfersForStorage uint64 - // The maximum number of simultaneous data transfers from any single client - // for storage deals. - // Unset by default (0), and values higher than SimultaneousTransfersForStorage - // will have no effect; i.e. the total number of simultaneous data transfers - // across all storage clients is bound by SimultaneousTransfersForStorage - // regardless of this number. - SimultaneousTransfersForStoragePerClient uint64 - // The maximum number of parallel online data transfers for retrieval deals - SimultaneousTransfersForRetrieval uint64 - // Minimum start epoch buffer to give time for sealing of sector with deal. - StartEpochSealingBuffer uint64 - - // A command used for fine-grained evaluation of storage deals - // see https://boost.filecoin.io/configuration/deal-filters for more details - Filter string - // A command used for fine-grained evaluation of retrieval deals - // see https://boost.filecoin.io/configuration/deal-filters for more details - RetrievalFilter string - - RetrievalPricing *lotus_config.RetrievalPricing -} - type DealmakingConfig struct { // When enabled, the miner can accept online deals ConsiderOnlineStorageDeals bool @@ -183,27 +128,10 @@ type DealmakingConfig struct { StartEpochSealingBuffer uint64 // The amount of time to keep deal proposal logs for before cleaning them up. DealProposalLogDuration Duration - // The amount of time to keep retrieval deal logs for before cleaning them up. - // Note RetrievalLogDuration should exceed the StalledRetrievalTimeout as the - // logs db is leveraged for pruning stalled retrievals. - RetrievalLogDuration Duration - // The amount of time stalled retrieval deals will remain open before being canceled. - StalledRetrievalTimeout Duration // A command used for fine-grained evaluation of storage deals // see https://boost.filecoin.io/configuration/deal-filters for more details Filter string - // A command used for fine-grained evaluation of retrieval deals - // see https://boost.filecoin.io/configuration/deal-filters for more details - RetrievalFilter string - - RetrievalPricing *lotus_config.RetrievalPricing - - // The maximum number of shards cached by the Dagstore for retrieval - // Lower this limit if boostd memory is too high during retrievals - BlockstoreCacheMaxShards int - // How long a blockstore shard should be cached before expiring without use - BlockstoreCacheExpiry Duration // How long to cache calls to check whether a sector is unsealed IsUnsealedCacheExpiry Duration @@ -218,47 +146,6 @@ type DealmakingConfig struct { // boost process MaxConcurrentLocalCommp uint64 - // The public multi-address for retrieving deals with booster-http. - // Note: Must be in multiaddr format, eg /dns/foo.com/tcp/443/https - HTTPRetrievalMultiaddr string - - // The maximum number of concurrent storage deal HTTP downloads. - // Note that this is a soft maximum; if some downloads stall, - // more downloads are allowed to start. - HttpTransferMaxConcurrentDownloads uint64 - // The period between checking if downloads have stalled. - HttpTransferStallCheckPeriod Duration - // The time that can elapse before a download is considered stalled (and - // another concurrent download is allowed to start). - HttpTransferStallTimeout Duration - - // The libp2p peer id used by booster-bitswap. - // Run 'booster-bitswap init' to get the peer id. - // When BitswapPeerID is not empty boostd will: - // - listen on bitswap protocols on boostd's own peer id and proxy - // requests to booster-bitswap - // - advertise boostd's peer id in bitswap records to the content indexer - // (bitswap clients connect to boostd, which proxies the requests to - // booster-bitswap) - // - list bitswap as an available transport on the retrieval transport protocol - BitswapPeerID string - - // Public multiaddresses for booster-bitswap. - // If empty - // - booster-bitswap is assumed to be running privately - // - boostd acts as a proxy: it listens on bitswap protocols on boostd's own - // peer id and forwards them to booster-bitswap - // If public addresses are set - // - boostd announces the booster-bitswap peer id to the indexer as an - // extended provider - // - clients make connections directly to the booster-bitswap process - // (boostd does not act as a proxy) - BitswapPublicAddresses []string - - // If operating in public mode, in order to announce booster-bitswap as an extended provider, this value must point to a - // a file containing the booster-bitswap peer id's private key. Can be left blank when operating with protocol proxy. - BitswapPrivKeyFile string - // The deal logs older than DealLogDurationDays are deleted from the logsDB // to keep the size of logsDB in check. Set the value as "0" to disable log cleanup DealLogDurationDays int @@ -272,21 +159,6 @@ type DealmakingConfig struct { // accepted boost will tag funds for that deal so that they cannot be used // for any other deal. FundsTaggingEnabled bool - - // Whether to enable legacy deals on the Boost node or not. We recommend keeping - // them disabled. These will be completely deprecated soon. - EnableLegacyStorageDeals bool - - // When set to true, the user is responsible for publishing deals manually. - // The values of MaxDealsPerPublishMsg and PublishMsgPeriod will be - // ignored, and deals will remain in the pending state until manually published. - ManualDealPublish bool - - // The connect strings for the RPC APIs of each miner that boost can read - // sector data from when serving graphsync retrievals. - // If this parameter is not set, boost will serve data from the endpoint - // configured in SectorIndexApiInfo. - GraphsyncStorageAccessApiInfo []string } type ContractDealsConfig struct { @@ -369,20 +241,6 @@ type IndexProviderHttpPublisherConfig struct { WithLibp2p bool } -type FeeConfig struct { - // The maximum fee to pay when sending the PublishStorageDeals message - MaxPublishDealsFee types.FIL - // The maximum fee to pay when sending the AddBalance message (used by legacy markets) - MaxMarketBalanceAddFee types.FIL -} - -func (c *FeeConfig) Legacy() lotus_config.MinerFeeConfig { - return lotus_config.MinerFeeConfig{ - MaxPublishDealsFee: c.MaxPublishDealsFee, - MaxMarketBalanceAddFee: c.MaxMarketBalanceAddFee, - } -} - type StorageConfig struct { // The maximum number of concurrent fetch operations to the storage subsystem ParallelFetchLimit int @@ -437,6 +295,15 @@ type LocalIndexDirectoryLeveldbConfig struct { } type HttpDownloadConfig struct { + // The maximum number of concurrent storage deal HTTP downloads. + // Note that this is a soft maximum; if some downloads stall, + // more downloads are allowed to start. + HttpTransferMaxConcurrentDownloads uint64 + // The period between checking if downloads have stalled. + HttpTransferStallCheckPeriod Duration + // The time that can elapse before a download is considered stalled (and + // another concurrent download is allowed to start). + HttpTransferStallTimeout Duration // NChunks is a number of chunks to split HTTP downloads into. Each chunk is downloaded in the goroutine of its own // which improves the overall download speed. NChunks is always equal to 1 for libp2p transport because libp2p server // doesn't support range requests yet. NChunks must be greater than 0 and less than 16, with the default of 5. @@ -445,3 +312,81 @@ type HttpDownloadConfig struct { // The default is false. AllowPrivateIPs bool } + +type RetrievalConfig struct { + GraphsyncRetrievalConfig + BitswapRetrievalConfig + HTTPRetrievalConfig +} + +type BitswapRetrievalConfig struct { + // The libp2p peer id used by booster-bitswap. + // Run 'booster-bitswap init' to get the peer id. + // When BitswapPeerID is not empty boostd will: + // - listen on bitswap protocols on boostd's own peer id and proxy + // requests to booster-bitswap + // - advertise boostd's peer id in bitswap records to the content indexer + // (bitswap clients connect to boostd, which proxies the requests to + // booster-bitswap) + // - list bitswap as an available transport on the retrieval transport protocol + BitswapPeerID string + + // Public multiaddresses for booster-bitswap. + // If empty + // - booster-bitswap is assumed to be running privately + // - boostd acts as a proxy: it listens on bitswap protocols on boostd's own + // peer id and forwards them to booster-bitswap + // If public addresses are set + // - boostd announces the booster-bitswap peer id to the indexer as an + // extended provider + // - clients make connections directly to the booster-bitswap process + // (boostd does not act as a proxy) + BitswapPublicAddresses []string + + // If operating in public mode, in order to announce booster-bitswap as an extended provider, this value must point to a + // a file containing the booster-bitswap peer id's private key. Can be left blank when operating with protocol proxy. + BitswapPrivKeyFile string +} + +type HTTPRetrievalConfig struct { + // The public multi-address for retrieving deals with booster-http. + // Note: Must be in multiaddr format, eg /dns/foo.com/tcp/443/https + HTTPRetrievalMultiaddr string +} + +type GraphsyncRetrievalConfig struct { + // The maximum number of parallel online data transfers for retrieval deals + SimultaneousTransfersForRetrieval uint64 + // The amount of time to keep retrieval deal logs for before cleaning them up. + // Note RetrievalLogDuration should exceed the StalledRetrievalTimeout as the + // logs db is leveraged for pruning stalled retrievals. + RetrievalLogDuration Duration + // The amount of time stalled retrieval deals will remain open before being canceled. + StalledRetrievalTimeout Duration + // The connect strings for the RPC APIs of each miner that boost can read + // sector data from when serving graphsync retrievals. + // If this parameter is not set, boost will serve data from the endpoint + // configured in SectorIndexApiInfo. + GraphsyncStorageAccessApiInfo []string + // A command used for fine-grained evaluation of retrieval deals + // see https://boost.filecoin.io/configuration/deal-filters for more details + RetrievalFilter string +} + +type DealPublishConfig struct { + // When set to true, the user is responsible for publishing deals manually. + // The values of MaxDealsPerPublishMsg and PublishMsgPeriod will be + // ignored, and deals will remain in the pending state until manually published. + ManualDealPublish bool + + // When a deal is ready to publish, the amount of time to wait for more + // deals to be ready to publish before publishing them all as a batch + PublishMsgPeriod Duration + // The maximum number of deals to include in a single PublishStorageDeals + // message + MaxDealsPerPublishMsg uint64 + // The maximum collateral that the provider will put up against a deal, + // as a multiplier of the minimum collateral bound + // The maximum fee to pay when sending the PublishStorageDeals message + MaxPublishDealsFee types.FIL +} diff --git a/node/modules/graphsync.go b/node/modules/graphsync.go index a63bd1d71..5785dc9f4 100644 --- a/node/modules/graphsync.go +++ b/node/modules/graphsync.go @@ -24,13 +24,13 @@ import ( ) // RetrievalGraphsync creates a graphsync instance used to serve retrievals. -func RetrievalGraphsync(parallelTransfersForStorage uint64, parallelTransfersForStoragePerPeer uint64, parallelTransfersForRetrieval uint64) func(mctx lotus_helpers.MetricsCtx, lc fx.Lifecycle, pid *piecedirectory.PieceDirectory, h host.Host, net dtypes.ProviderTransferNetwork, dealDecider dtypes.RetrievalDealFilter, sa *lib.MultiMinerAccessor, askGetter server.RetrievalAskGetter) (*server.GraphsyncUnpaidRetrieval, error) { +func RetrievalGraphsync(parallelTransfersForRetrieval uint64) func(mctx lotus_helpers.MetricsCtx, lc fx.Lifecycle, pid *piecedirectory.PieceDirectory, h host.Host, net dtypes.ProviderTransferNetwork, dealDecider dtypes.RetrievalDealFilter, sa *lib.MultiMinerAccessor, askGetter server.RetrievalAskGetter) (*server.GraphsyncUnpaidRetrieval, error) { return func(mctx lotus_helpers.MetricsCtx, lc fx.Lifecycle, pid *piecedirectory.PieceDirectory, h host.Host, net dtypes.ProviderTransferNetwork, dealDecider dtypes.RetrievalDealFilter, sa *lib.MultiMinerAccessor, askGetter server.RetrievalAskGetter) (*server.GraphsyncUnpaidRetrieval, error) { // Graphsync tracks metrics separately, pass nil blockMetrics to the remote blockstore rb := remoteblockstore.NewRemoteBlockstore(pid, nil) // Create a Graphsync instance - mkgs := Graphsync(parallelTransfersForStorage, parallelTransfersForStoragePerPeer, parallelTransfersForRetrieval) + mkgs := Graphsync(parallelTransfersForRetrieval) gs := mkgs(mctx, lc, rb, pid, h) // Wrap the Graphsync instance with a handler for unpaid retrieval requests @@ -62,7 +62,7 @@ func RetrievalGraphsync(parallelTransfersForStorage uint64, parallelTransfersFor } } -func Graphsync(parallelTransfersForStorage uint64, parallelTransfersForStoragePerPeer uint64, parallelTransfersForRetrieval uint64) func(mctx lotus_helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, pid *piecedirectory.PieceDirectory, h host.Host) dtypes.StagingGraphsync { +func Graphsync(parallelTransfersForRetrieval uint64) func(mctx lotus_helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, pid *piecedirectory.PieceDirectory, h host.Host) dtypes.StagingGraphsync { return func(mctx lotus_helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, pid *piecedirectory.PieceDirectory, h host.Host) dtypes.StagingGraphsync { graphsyncNetwork := gsnet.NewFromLibp2pHost(h) lsys := storeutil.LinkSystemForBlockstore(ibs) @@ -70,9 +70,7 @@ func Graphsync(parallelTransfersForStorage uint64, parallelTransfersForStoragePe graphsyncNetwork, lsys, graphsync.RejectAllRequestsByDefault(), - graphsync.MaxInProgressIncomingRequests(parallelTransfersForRetrieval), - graphsync.MaxInProgressIncomingRequestsPerPeer(parallelTransfersForStoragePerPeer), - graphsync.MaxInProgressOutgoingRequests(parallelTransfersForStorage), + graphsync.MaxInProgressOutgoingRequests(parallelTransfersForRetrieval), graphsync.MaxLinksPerIncomingRequests(config.MaxTraversalLinks), graphsync.MaxLinksPerOutgoingRequests(config.MaxTraversalLinks)) diff --git a/node/modules/piecedirectory.go b/node/modules/piecedirectory.go index a036e2c27..5218004ea 100644 --- a/node/modules/piecedirectory.go +++ b/node/modules/piecedirectory.go @@ -107,7 +107,7 @@ func NewMultiminerSectorAccessor(cfg *config.Boost) func(full v1api.FullNode) *l return func(full v1api.FullNode) *lib.MultiMinerAccessor { // Get the endpoints of all the miners that this boost node can query // for retrieval data when serving graphsync retrievals - storageApiInfos := cfg.Dealmaking.GraphsyncStorageAccessApiInfo + storageApiInfos := cfg.Retrievals.GraphsyncRetrievalConfig.GraphsyncStorageAccessApiInfo if len(storageApiInfos) == 0 { // If the endpoints aren't explicitly configured, fall back to just // serving retrieval data from the same endpoint where data is stored to diff --git a/node/modules/retrieval.go b/node/modules/retrieval.go index d6d7773f8..32f1e2742 100644 --- a/node/modules/retrieval.go +++ b/node/modules/retrieval.go @@ -27,7 +27,7 @@ import ( // based off the host and boost config func bitswapMultiAddrs(cfg *config.Boost, h host.Host) ([]multiaddr.Multiaddr, error) { // if BitswapPublicAddresses is empty, that means we'll be serving bitswap directly from this host, so just return host multiaddresses - if len(cfg.Dealmaking.BitswapPublicAddresses) == 0 { + if len(cfg.Retrievals.BitswapRetrievalConfig.BitswapPublicAddresses) == 0 { maddr, err := peer.AddrInfoToP2pAddrs(&peer.AddrInfo{ ID: h.ID(), Addrs: h.Addrs(), @@ -43,7 +43,7 @@ func bitswapMultiAddrs(cfg *config.Boost, h host.Host) ([]multiaddr.Multiaddr, e // parse all of the public multiaddrs var addrs []multiaddr.Multiaddr - for _, addrString := range cfg.Dealmaking.BitswapPublicAddresses { + for _, addrString := range cfg.Retrievals.BitswapRetrievalConfig.BitswapPublicAddresses { addr, err := multiaddr.NewMultiaddr(addrString) if err != nil { return nil, fmt.Errorf("Could not parse bitswap address '%s' as multiaddr: %w", addrString, err) @@ -52,9 +52,9 @@ func bitswapMultiAddrs(cfg *config.Boost, h host.Host) ([]multiaddr.Multiaddr, e } // in order to make these multiaddrs fully dialable, we encapsulate the bitswap peer id inside of them - bsPeerID, err := peer.Decode(cfg.Dealmaking.BitswapPeerID) + bsPeerID, err := peer.Decode(cfg.Retrievals.BitswapRetrievalConfig.BitswapPeerID) if err != nil { - return nil, fmt.Errorf("Could not parse bitswap peer id '%s': %w", cfg.Dealmaking.BitswapPeerID, err) + return nil, fmt.Errorf("Could not parse bitswap peer id '%s': %w", cfg.Retrievals.BitswapRetrievalConfig.BitswapPeerID, err) } return peer.AddrInfoToP2pAddrs(&peer.AddrInfo{ ID: bsPeerID, @@ -83,12 +83,12 @@ func NewTransportsListener(cfg *config.Boost) func(h host.Host) (*lp2pimpl.Trans // If there's an http retrieval address specified, add HTTP to the list // of supported protocols - if cfg.Dealmaking.HTTPRetrievalMultiaddr != "" { - maddr, err := multiaddr.NewMultiaddr(cfg.Dealmaking.HTTPRetrievalMultiaddr) + if cfg.Retrievals.HTTPRetrievalConfig.HTTPRetrievalMultiaddr != "" { + maddr, err := multiaddr.NewMultiaddr(cfg.Retrievals.HTTPRetrievalConfig.HTTPRetrievalMultiaddr) if err != nil { msg := "HTTPRetrievalURL must be in multi-address format. " msg += "Could not parse '%s' as multiaddr: %w" - return nil, fmt.Errorf(msg, cfg.Dealmaking.HTTPRetrievalMultiaddr, err) + return nil, fmt.Errorf(msg, cfg.Retrievals.HTTPRetrievalConfig.HTTPRetrievalMultiaddr, err) } protos = append(protos, types.Protocol{ Name: "http", @@ -98,7 +98,7 @@ func NewTransportsListener(cfg *config.Boost) func(h host.Host) (*lp2pimpl.Trans // If there's a bitswap peer address specified, add bitswap to the list // of supported protocols - if cfg.Dealmaking.BitswapPeerID != "" { + if cfg.Retrievals.BitswapRetrievalConfig.BitswapPeerID != "" { addrs, err := bitswapMultiAddrs(cfg, h) if err != nil { return nil, err @@ -183,8 +183,8 @@ func NewProtocolProxy(cfg *config.Boost) func(h host.Host) (*protocolproxy.Proto return func(h host.Host) (*protocolproxy.ProtocolProxy, error) { peerConfig := map[peer.ID][]protocol.ID{} // add bitswap if a peer id is set AND the peer is only private - if cfg.Dealmaking.BitswapPeerID != "" && len(cfg.Dealmaking.BitswapPublicAddresses) == 0 { - bsPeerID, err := peer.Decode(cfg.Dealmaking.BitswapPeerID) + if cfg.Retrievals.BitswapRetrievalConfig.BitswapPeerID != "" && len(cfg.Retrievals.BitswapRetrievalConfig.BitswapPublicAddresses) == 0 { + bsPeerID, err := peer.Decode(cfg.Retrievals.BitswapRetrievalConfig.BitswapPeerID) if err != nil { return nil, err } diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index be545bbe7..bdab69cbd 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -322,7 +322,7 @@ func NewSectorStateDB(sqldb *sql.DB) *db.SectorStateDB { func HandleBoostLibp2pDeals(cfg *config.Boost) func(lc fx.Lifecycle, h host.Host, prov *storagemarket.Provider, a v1api.FullNode, idxProv *indexprovider.Wrapper, plDB *db.ProposalLogsDB, spApi sealingpipeline.API) { return func(lc fx.Lifecycle, h host.Host, prov *storagemarket.Provider, a v1api.FullNode, idxProv *indexprovider.Wrapper, plDB *db.ProposalLogsDB, spApi sealingpipeline.API) { - lp2pnet := lp2pimpl.NewDealProvider(h, prov, a, plDB, spApi, cfg.Dealmaking.EnableLegacyStorageDeals) + lp2pnet := lp2pimpl.NewDealProvider(h, prov, a, plDB, spApi) lc.Append(fx.Hook{ OnStart: func(ctx context.Context) error { @@ -476,9 +476,9 @@ func NewStorageMarketProvider(provAddr address.Address, cfg *config.Boost) func( MaxTransferDuration: time.Duration(cfg.Dealmaking.MaxTransferDuration), RemoteCommp: cfg.Dealmaking.RemoteCommp, TransferLimiter: storagemarket.TransferLimiterConfig{ - MaxConcurrent: cfg.Dealmaking.HttpTransferMaxConcurrentDownloads, - StallCheckPeriod: time.Duration(cfg.Dealmaking.HttpTransferStallCheckPeriod), - StallTimeout: time.Duration(cfg.Dealmaking.HttpTransferStallTimeout), + MaxConcurrent: cfg.HttpDownload.HttpTransferMaxConcurrentDownloads, + StallCheckPeriod: time.Duration(cfg.HttpDownload.HttpTransferStallCheckPeriod), + StallTimeout: time.Duration(cfg.HttpDownload.HttpTransferStallTimeout), }, DealLogDurationDays: cfg.Dealmaking.DealLogDurationDays, StorageFilter: cfg.Dealmaking.Filter, diff --git a/react/src/StorageSpace.js b/react/src/StorageSpace.js index de13fa98e..e8ad7ec31 100644 --- a/react/src/StorageSpace.js +++ b/react/src/StorageSpace.js @@ -1,5 +1,5 @@ import {useQuery} from "@apollo/react-hooks"; -import {LegacyStorageQuery, StorageQuery} from "./gql"; +import {StorageQuery} from "./gql"; import React from "react"; import {addCommas, humanFileSize} from "./util"; import './StorageSpace.css' @@ -12,7 +12,6 @@ import {Info} from "./Info" export function StorageSpacePage(props) { return - } @@ -81,61 +80,6 @@ function StorageSpaceContent(props) { } -function LegacyStorageSpaceContent(props) { - const {loading, error, data} = useQuery(LegacyStorageQuery, { pollInterval: 10000 }) - - if (loading) { - return
Loading...
- } - if (error) { - return
Error: {error.message}
- } - - var storage = data.legacyStorage - if (storage.Capacity === 0n) { - return null - } - - const bars = [{ - name: 'Used', - className: 'used', - amount: storage.Used, - }, { - name: 'Free', - className: 'free', - amount: storage.Capacity - storage.Used, - }] - - return
-

Legacy Deal transfers

- -
- - -
- - - - {bars.map(bar => ( - - - - - ))} - - - - - -
- {bar.name} - {humanFileSize(bar.amount)} ({addCommas(bar.amount)} bytes)
- Mount Point - The path to the directory where downloaded data is kept until the deal is added to a sector - {storage.MountPoint}
-
-} - export function StorageSpaceMenuItem(props) { const {data} = useQuery(StorageQuery, { pollInterval: 10000, diff --git a/react/src/gql.js b/react/src/gql.js index 4d5c3b879..62361d78a 100644 --- a/react/src/gql.js +++ b/react/src/gql.js @@ -646,16 +646,6 @@ const StorageQuery = gql` } `; -const LegacyStorageQuery = gql` - query AppLegacyStorageQuery { - legacyStorage { - Capacity - Used - MountPoint - } - } -`; - const SealingPipelineQuery = gql` query AppSealingPipelineQuery { sealingpipeline { @@ -924,7 +914,6 @@ export { FlaggedPiecesCountQuery, LIDQuery, StorageQuery, - LegacyStorageQuery, FundsQuery, FundsLogsQuery, DealPublishQuery, diff --git a/storagemarket/lp2pimpl/net.go b/storagemarket/lp2pimpl/net.go index 3081c2ff4..0244aff7b 100644 --- a/storagemarket/lp2pimpl/net.go +++ b/storagemarket/lp2pimpl/net.go @@ -165,23 +165,21 @@ func NewDealClient(h host.Host, addr address.Address, walletApi api.Wallet, opti // DealProvider listens for incoming deal proposals over libp2p type DealProvider struct { - ctx context.Context - host host.Host - prov *storagemarket.Provider - fullNode v1api.FullNode - plDB *db.ProposalLogsDB - spApi sealingpipeline.API - enableLegacyDeals bool + ctx context.Context + host host.Host + prov *storagemarket.Provider + fullNode v1api.FullNode + plDB *db.ProposalLogsDB + spApi sealingpipeline.API } -func NewDealProvider(h host.Host, prov *storagemarket.Provider, fullNodeApi v1api.FullNode, plDB *db.ProposalLogsDB, spApi sealingpipeline.API, enableLegacyDeals bool) *DealProvider { +func NewDealProvider(h host.Host, prov *storagemarket.Provider, fullNodeApi v1api.FullNode, plDB *db.ProposalLogsDB, spApi sealingpipeline.API) *DealProvider { p := &DealProvider{ - host: h, - prov: prov, - fullNode: fullNodeApi, - plDB: plDB, - spApi: spApi, - enableLegacyDeals: enableLegacyDeals, + host: h, + prov: prov, + fullNode: fullNodeApi, + plDB: plDB, + spApi: spApi, } return p } From 08dd80299efc11a8c76ca7697b8ec8e955caed9c Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Tue, 19 Dec 2023 23:31:07 +0400 Subject: [PATCH 27/34] remove graphsync unit tests --- .../server/gsunpaidretrieval_test.go | 423 ------------------ 1 file changed, 423 deletions(-) delete mode 100644 retrievalmarket/server/gsunpaidretrieval_test.go diff --git a/retrievalmarket/server/gsunpaidretrieval_test.go b/retrievalmarket/server/gsunpaidretrieval_test.go deleted file mode 100644 index ee4f9572c..000000000 --- a/retrievalmarket/server/gsunpaidretrieval_test.go +++ /dev/null @@ -1,423 +0,0 @@ -package server - -import ( - "context" - "errors" - "fmt" - "io" - "os" - "strings" - "testing" - "time" - - "github.com/dustin/go-humanize" - graphsyncimpl "github.com/filecoin-project/boost-graphsync/impl" - gsnet "github.com/filecoin-project/boost-graphsync/network" - "github.com/filecoin-project/boost-graphsync/storeutil" - clinode "github.com/filecoin-project/boost/cli/node" - "github.com/filecoin-project/boost/datatransfer" - dtgstransport "github.com/filecoin-project/boost/datatransfer/transport/graphsync" - bdclientutil "github.com/filecoin-project/boost/extern/boostd-data/clientutil" - "github.com/filecoin-project/boost/extern/boostd-data/model" - "github.com/filecoin-project/boost/markets/utils" - "github.com/filecoin-project/boost/piecedirectory" - gsclient "github.com/filecoin-project/boost/retrievalmarket/client" - "github.com/filecoin-project/boost/retrievalmarket/testutil" - "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/api" - lotusmocks "github.com/filecoin-project/lotus/api/mocks" - test "github.com/filecoin-project/lotus/chain/events/state/mock" - "github.com/golang/mock/gomock" - "github.com/google/uuid" - "github.com/ipfs/boxo/blockservice" - "github.com/ipfs/boxo/exchange/offline" - "github.com/ipfs/boxo/ipld/merkledag" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" - blocksutil "github.com/ipfs/go-ipfs-blocksutil" - logging "github.com/ipfs/go-log/v2" - "github.com/ipld/go-car/v2" - "github.com/ipld/go-ipld-prime" - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - basicnode "github.com/ipld/go-ipld-prime/node/basic" - "github.com/ipld/go-ipld-prime/traversal" - "github.com/ipld/go-ipld-prime/traversal/selector" - "github.com/ipld/go-ipld-prime/traversal/selector/builder" - "github.com/stretchr/testify/require" - "golang.org/x/term" -) - -var tlog = logging.Logger("testgs") - -type testCase struct { - name string - reqPayloadCid cid.Cid - watch func(client *gsclient.Client, gsupr *GraphsyncUnpaidRetrieval) - ask *legacyretrievaltypes.Ask - noUnsealedCopy bool - useCarV2 bool - expectErr bool - expectClientCancelEvent bool - expectProviderCancelEvent bool - expectRejection string -} - -var providerCancelled = errors.New("provider cancelled") - -//var clientCancelled = errors.New("client cancelled") -//var clientRejected = errors.New("client received reject response") - -func TestGS(t *testing.T) { - t.Skip("refactor tests to use boost client") - //_ = logging.SetLogLevel("testgs", "debug") - _ = logging.SetLogLevel("testgs", "info") - _ = logging.SetLogLevel("dt-impl", "debug") - - missingCid := cid.MustParse("baguqeeraaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") - - testCases := []testCase{{ - name: "happy path", - }, { - name: "happy path w/ carv2", - useCarV2: true, - }, { - name: "request missing payload cid", - reqPayloadCid: missingCid, - expectErr: true, - }, { - name: "request for piece with no unsealed sectors", - noUnsealedCopy: true, - expectErr: true, - expectRejection: "no unsealed piece", - }, { - name: "request for non-zero price per byte", - ask: &legacyretrievaltypes.Ask{ - UnsealPrice: abi.NewTokenAmount(0), - PricePerByte: abi.NewTokenAmount(1), - }, - expectErr: true, - expectRejection: "ask price is non-zero", - }, { - // Note: we disregard the unseal price because we only serve deals - // with an unsealed piece, so the unseal price is irrelevant. - // Therefore the retrieval should succeed for non-zero unseal price. - name: "request for non-zero unseal price", - ask: &legacyretrievaltypes.Ask{ - UnsealPrice: abi.NewTokenAmount(1), - PricePerByte: abi.NewTokenAmount(0), - }, - }, { - name: "provider cancel request after sending 2 blocks", - watch: func(client *gsclient.Client, gsupr *GraphsyncUnpaidRetrieval) { - count := 0 - gsupr.outgoingBlockHook = func(state *retrievalState) { - count++ - if count == 2 { - tlog.Debug("provider cancelling client deal") - err := gsupr.CancelTransfer(context.TODO(), state.cs.transferID, &state.cs.recipient) - require.NoError(t, err) - } - if count == 10 { - tlog.Warn("sending last block but client cancel hasn't arrived yet") - } - } - }, - expectErr: true, - expectClientCancelEvent: true, - }, { - name: "provider cancel request after sending 2 blocks without peer id", - watch: func(client *gsclient.Client, gsupr *GraphsyncUnpaidRetrieval) { - count := 0 - gsupr.outgoingBlockHook = func(state *retrievalState) { - count++ - if count == 2 { - tlog.Debug("provider cancelling client deal") - err := gsupr.CancelTransfer(context.TODO(), state.cs.transferID, nil) - require.NoError(t, err) - } - if count == 10 { - tlog.Warn("sending last block but client cancel hasn't arrived yet") - } - } - }, - expectErr: true, - expectClientCancelEvent: true, - }} - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - runRequestTest(t, tc) - }) - } -} - -func runRequestTest(t *testing.T, tc testCase) { - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) - defer cancel() - - // Create a CAR file and set up mocks - testData := testutil.NewLibp2pTestData(ctx, t) - - // Create a random CAR file - carRootCid, carFilePath := piecedirectory.CreateCarFile(t) - - var sectionReader car.SectionReader - - if tc.useCarV2 { - var err error - sectionReader, err = os.Open(carFilePath) - require.NoError(t, err) - defer sectionReader.(*os.File).Close() - } else { - carReader, err := car.OpenReader(carFilePath) - require.NoError(t, err) - defer carReader.Close() - sectionReader, err = carReader.DataReader() - require.NoError(t, err) - - } - - // Any calls to get a reader over data should return a reader over the random CAR file - pr := piecedirectory.CreateMockPieceReader(t, sectionReader) - - carv1Bytes, err := io.ReadAll(sectionReader) - require.NoError(t, err) - carSize := len(carv1Bytes) - - maddr := address.TestAddress - pieceCid := GenerateCids(1)[0] - sectorID := abi.SectorNumber(1) - offset := abi.PaddedPieceSize(0) - dealInfo := model.DealInfo{ - DealUuid: uuid.New().String(), - ChainDealID: abi.DealID(1), - MinerAddr: maddr, - SectorID: sectorID, - PieceOffset: offset, - PieceLength: abi.UnpaddedPieceSize(carSize).Padded(), - } - - cl := bdclientutil.NewTestStore(ctx) - defer cl.Close(ctx) - - pd := piecedirectory.NewPieceDirectory(cl, pr, 1) - pd.Start(ctx) - err = pd.AddDealForPiece(ctx, pieceCid, dealInfo) - require.NoError(t, err) - - sa := &mockSectorAccessor{ - unsealed: !tc.noUnsealedCopy, - } - vdeps := ValidationDeps{ - PieceDirectory: pd, - SectorAccessor: sa, - AskStore: NewRetrievalAskGetter(), - } - - // Create a blockstore over the CAR file blocks - carDataBs, err := pd.GetBlockstore(ctx, pieceCid) - require.NoError(t, err) - - // Wrap graphsync with the graphsync unpaid retrieval interceptor - linkSystem2 := storeutil.LinkSystemForBlockstore(carDataBs) - gs2 := graphsyncimpl.New(ctx, gsnet.NewFromLibp2pHost(testData.Host2), linkSystem2) - gsupr, err := NewGraphsyncUnpaidRetrieval(testData.Host2.ID(), gs2, testData.DTNet2, vdeps) - require.NoError(t, err) - - ctrl := gomock.NewController(t) - fn := lotusmocks.NewMockFullNode(ctrl) - peerID := testData.Host2.ID() - var maddrs []abi.Multiaddrs - for _, mma := range testData.Host2.Addrs() { - maddrs = append(maddrs, mma.Bytes()) - } - minfo := api.MinerInfo{ - PeerId: &peerID, - Multiaddrs: maddrs, - Worker: address.TestAddress2, - } - fn.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(minfo, nil).AnyTimes() - chainHead, err := test.MockTipset(maddr, 1) - require.NoError(t, err) - fn.EXPECT().ChainHead(gomock.Any()).Return(chainHead, nil).AnyTimes() - - queryHandler := NewQueryAskHandler(testData.Host2, maddr, pd, sa, NewRetrievalAskGetter(), fn) - queryHandler.Start() - defer queryHandler.Stop() - - // Create a Graphsync transport and call SetEventHandler, which registers - // listeners for all the Graphsync hooks. - gsTransport := dtgstransport.NewTransport(testData.Host2.ID(), gsupr) - err = gsTransport.SetEventHandler(nil) - require.NoError(t, err) - - gsupr.SubscribeToDataTransferEvents(func(event datatransfer.Event, channelState datatransfer.ChannelState) { - tlog.Debugf("prov dt: %s %s / %s", datatransfer.Events[event.Code], event.Message, datatransfer.Statuses[channelState.Status()]) - }) - err = gsupr.Start(ctx) - require.NoError(t, err) - - client := newTestClient(t, testData, fn) - - if tc.watch != nil { - tc.watch(client, gsupr) - } - - // Watch for provider completion - providerResChan := make(chan error) - gsupr.SubscribeToMarketsEvents(func(event legacyretrievaltypes.ProviderEvent, state legacyretrievaltypes.ProviderDealState) { - tlog.Debugf("prov mkt: %s %s %s", legacyretrievaltypes.ProviderEvents[event], state.Status.String(), state.Message) - switch event { - case legacyretrievaltypes.ProviderEventComplete: - providerResChan <- nil - case legacyretrievaltypes.ProviderEventCancelComplete: - providerResChan <- providerCancelled - case legacyretrievaltypes.ProviderEventDataTransferError: - providerResChan <- errors.New(state.Message) - } - }) - - // Retrieve the data - tlog.Infof("Retrieve cid %s from peer %s", carRootCid, client.ClientAddr.String()) - // Use an explore-all but add unixfs-preload to make sure we have UnixFS - // ADL support wired up. - ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) - sel := ssb.ExploreInterpretAs("unixfs-preload", ssb.ExploreRecursive( - selector.RecursionLimitNone(), - ssb.ExploreAll(ssb.ExploreRecursiveEdge()), - )).Node() - - query, err := client.RetrievalQuery(ctx, maddr, pieceCid) - require.NoError(t, err) - - proposal, err := gsclient.RetrievalProposalForAsk(query, carRootCid, sel) - require.NoError(t, err) - - // Retrieve the data - _, err = client.RetrieveContentWithProgressCallback( - ctx, - maddr, - proposal, - func(bytesReceived_ uint64) { - printProgress(bytesReceived_) - }, - ) - require.NoError(t, err) - - dservOffline := merkledag.NewDAGService(blockservice.New(testData.Bs1, offline.Exchange(testData.Bs1))) - - // if we used a selector - need to find the sub-root the user actually wanted to retrieve - if sel != nil { - var subRootFound bool - err = utils.TraverseDag( - ctx, - dservOffline, - carRootCid, - sel, - func(p traversal.Progress, n ipld.Node, r traversal.VisitReason) error { - if r == traversal.VisitReason_SelectionMatch { - - require.Equal(t, p.LastBlock.Path.String(), p.Path.String()) - - cidLnk, castOK := p.LastBlock.Link.(cidlink.Link) - require.True(t, castOK) - - carRootCid = cidLnk.Cid - subRootFound = true - } - return nil - }, - ) - require.NoError(t, err) - - require.True(t, subRootFound) - } - - // Wait for provider completion - err = waitFor(ctx, t, providerResChan) - if tc.expectErr || tc.expectProviderCancelEvent { - require.Error(t, err) - if tc.expectProviderCancelEvent { - require.EqualError(t, err, providerCancelled.Error()) - } - } else { - require.NoError(t, err) - } - - //final verification -- the server has no active graphsync requests - stats := gsupr.GraphExchange.Stats() - require.Equal(t, stats.IncomingRequests.Active, uint64(0)) -} - -func newTestClient(t *testing.T, testData *testutil.Libp2pTestData, full api.FullNode) *gsclient.Client { - clientPath, err := os.MkdirTemp(t.TempDir(), "client") - require.NoError(t, err) - - clientNode, err := clinode.Setup(clientPath) - require.NoError(t, err) - clientNode.Host = testData.Host1 - //err = clientNode.Wallet.SetDefault(address.TestAddress2) - //require.NoError(t, err) - clientDs := namespace.Wrap(testData.Ds1, datastore.NewKey("/retrievals/client")) - addr, err := clientNode.Wallet.GetDefault() - require.NoError(t, err) - - // Create the retrieval client - fc, err := gsclient.NewClient(clientNode.Host, full, clientNode.Wallet, addr, testData.Bs1, clientDs, clientPath) - require.NoError(t, err) - return fc -} - -func waitFor(ctx context.Context, t *testing.T, resChan chan error) error { - var err error - select { - case <-ctx.Done(): - require.Fail(t, "test timed out") - case err = <-resChan: - } - return err -} - -type mockSectorAccessor struct { - unsealed bool -} - -func (m *mockSectorAccessor) IsUnsealed(ctx context.Context, minerAddr address.Address, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (bool, error) { - return m.unsealed, nil -} - -func printProgress(bytesReceived uint64) { - str := fmt.Sprintf("%v (%v)", bytesReceived, humanize.IBytes(bytesReceived)) - - termWidth, _, err := term.GetSize(int(os.Stdin.Fd())) - strLen := len(str) - if err == nil { - - if strLen < termWidth { - // If the string is shorter than the terminal width, pad right side - // with spaces to remove old text - str = strings.Join([]string{str, strings.Repeat(" ", termWidth-strLen)}, "") - } else if strLen > termWidth { - // If the string doesn't fit in the terminal, cut it down to a size - // that fits - str = str[:termWidth] - } - } - - fmt.Fprintf(os.Stderr, "%s\r", str) -} - -// GenerateCids produces n content identifiers. -func GenerateCids(n int) []cid.Cid { - cids := make([]cid.Cid, 0, n) - for i := 0; i < n; i++ { - c := blockGenerator.Next().Cid() - cids = append(cids, c) - } - return cids -} - -var blockGenerator = blocksutil.NewBlockGenerator() From cd9bc63ce968331c68409f949257a85f359abde5 Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Wed, 20 Dec 2023 13:27:51 +0400 Subject: [PATCH 28/34] add retrieval config headers --- node/config/doc_gen.go | 20 ++++++++++++++++++++ node/config/types.go | 6 +++--- 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index 03b0a7f3d..c93a1808d 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -700,6 +700,26 @@ Set this value to "" if the local index directory data service is embedded.`, message in lotus mpool`, }, }, + "RetrievalConfig": []DocField{ + { + Name: "Graphsync", + Type: "GraphsyncRetrievalConfig", + + Comment: ``, + }, + { + Name: "Bitswap", + Type: "BitswapRetrievalConfig", + + Comment: ``, + }, + { + Name: "HTTP", + Type: "HTTPRetrievalConfig", + + Comment: ``, + }, + }, "StorageConfig": []DocField{ { Name: "ParallelFetchLimit", diff --git a/node/config/types.go b/node/config/types.go index efddc6f2f..6383855d0 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -314,9 +314,9 @@ type HttpDownloadConfig struct { } type RetrievalConfig struct { - GraphsyncRetrievalConfig - BitswapRetrievalConfig - HTTPRetrievalConfig + Graphsync GraphsyncRetrievalConfig + Bitswap BitswapRetrievalConfig + HTTP HTTPRetrievalConfig } type BitswapRetrievalConfig struct { From fd5d5a3625c5f0d8a937301b90ca71c97ca6453b Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Wed, 20 Dec 2023 13:37:20 +0400 Subject: [PATCH 29/34] update retrieval configs --- gql/resolver_rtvllog.go | 2 +- indexprovider/wrapper.go | 20 ++++++++++---------- itests/shared/multiminer.go | 2 +- node/builder.go | 8 ++++---- node/config/def.go | 6 +++--- node/modules/piecedirectory.go | 2 +- node/modules/retrieval.go | 20 ++++++++++---------- 7 files changed, 30 insertions(+), 30 deletions(-) diff --git a/gql/resolver_rtvllog.go b/gql/resolver_rtvllog.go index 6fd73f95e..be062b29d 100644 --- a/gql/resolver_rtvllog.go +++ b/gql/resolver_rtvllog.go @@ -212,6 +212,6 @@ func (r *resolver) RetrievalLogsCount(ctx context.Context, args struct{ IsIndexe count, err := r.retDB.Count(ctx, isIndexer) return &retStateCount{ Count: int32(count), - Period: gqltypes.Uint64(r.cfg.Retrievals.GraphsyncRetrievalConfig.RetrievalLogDuration), + Period: gqltypes.Uint64(r.cfg.Retrievals.Graphsync.RetrievalLogDuration), }, err } diff --git a/indexprovider/wrapper.go b/indexprovider/wrapper.go index fd3a837a4..762b47af0 100644 --- a/indexprovider/wrapper.go +++ b/indexprovider/wrapper.go @@ -83,9 +83,9 @@ func NewWrapper(cfg *config.Boost) func(lc fx.Lifecycle, h host.Host, r repo.Loc _, isDisabled := prov.(*DisabledIndexProvider) // bitswap is enabled if there is a bitswap peer id - bitswapEnabled := cfg.Retrievals.BitswapRetrievalConfig.BitswapPeerID != "" + bitswapEnabled := cfg.Retrievals.Bitswap.BitswapPeerID != "" // http is considered enabled if there is an http retrieval multiaddr set - httpEnabled := cfg.Retrievals.HTTPRetrievalConfig.HTTPRetrievalMultiaddr != "" + httpEnabled := cfg.Retrievals.HTTP.HTTPRetrievalMultiaddr != "" // setup bitswap extended provider if there is a public multi addr for bitswap w := &Wrapper{ @@ -375,26 +375,26 @@ func (w *Wrapper) appendExtendedProviders(ctx context.Context, adBuilder *xprovi return err } var ep xproviders.Info - if len(w.cfg.Retrievals.BitswapRetrievalConfig.BitswapPublicAddresses) > 0 { - if w.cfg.Retrievals.BitswapRetrievalConfig.BitswapPrivKeyFile == "" { + if len(w.cfg.Retrievals.Bitswap.BitswapPublicAddresses) > 0 { + if w.cfg.Retrievals.Bitswap.BitswapPrivKeyFile == "" { return fmt.Errorf("missing required configuration key BitswapPrivKeyFile: " + "boost is configured with BitswapPublicAddresses but the BitswapPrivKeyFile configuration key is empty") } // we need the private key for bitswaps peerID in order to announce publicly - keyFile, err := os.ReadFile(w.cfg.Retrievals.BitswapRetrievalConfig.BitswapPrivKeyFile) + keyFile, err := os.ReadFile(w.cfg.Retrievals.Bitswap.BitswapPrivKeyFile) if err != nil { - return fmt.Errorf("opening BitswapPrivKeyFile %s: %w", w.cfg.Retrievals.BitswapRetrievalConfig.BitswapPrivKeyFile, err) + return fmt.Errorf("opening BitswapPrivKeyFile %s: %w", w.cfg.Retrievals.Bitswap.BitswapPrivKeyFile, err) } privKey, err := crypto.UnmarshalPrivateKey(keyFile) if err != nil { - return fmt.Errorf("unmarshalling BitswapPrivKeyFile %s: %w", w.cfg.Retrievals.BitswapRetrievalConfig.BitswapPrivKeyFile, err) + return fmt.Errorf("unmarshalling BitswapPrivKeyFile %s: %w", w.cfg.Retrievals.Bitswap.BitswapPrivKeyFile, err) } // setup an extended provider record, containing the booster-bitswap multi addr, // peer ID, private key for signing, and metadata ep = xproviders.Info{ - ID: w.cfg.Retrievals.BitswapRetrievalConfig.BitswapPeerID, - Addrs: w.cfg.Retrievals.BitswapRetrievalConfig.BitswapPublicAddresses, + ID: w.cfg.Retrievals.Bitswap.BitswapPeerID, + Addrs: w.cfg.Retrievals.Bitswap.BitswapPublicAddresses, Priv: privKey, Metadata: mbytes, } @@ -432,7 +432,7 @@ func (w *Wrapper) appendExtendedProviders(ctx context.Context, adBuilder *xprovi } var ep = xproviders.Info{ ID: w.h.ID().String(), - Addrs: []string{w.cfg.Retrievals.HTTPRetrievalConfig.HTTPRetrievalMultiaddr}, + Addrs: []string{w.cfg.Retrievals.HTTP.HTTPRetrievalMultiaddr}, Metadata: mbytes, Priv: key, } diff --git a/itests/shared/multiminer.go b/itests/shared/multiminer.go index 11dc9bf79..e3980c7e8 100644 --- a/itests/shared/multiminer.go +++ b/itests/shared/multiminer.go @@ -61,7 +61,7 @@ func RunMultiminerRetrievalTest(t *testing.T, rt func(ctx context.Context, t *te // Set up the second boost instance so that it can read sector data // not only from the second miner, but also from the first miner - cfg.Retrievals.GraphsyncRetrievalConfig.GraphsyncStorageAccessApiInfo = []string{cfg.SectorIndexApiInfo, miner1ApiInfo} + cfg.Retrievals.Graphsync.GraphsyncStorageAccessApiInfo = []string{cfg.SectorIndexApiInfo, miner1ApiInfo} // Set up some other ports so they don't clash cfg.Graphql.Port = 8081 diff --git a/node/builder.go b/node/builder.go index 97d85c61a..d724ec68e 100644 --- a/node/builder.go +++ b/node/builder.go @@ -503,7 +503,7 @@ func ConfigBoost(cfg *config.Boost) Option { Override(new(dtypes.ProviderTransferNetwork), modules.NewProviderTransferNetwork), Override(StartProviderDataTransferKey, server.NewProviderDataTransfer), Override(new(server.RetrievalAskGetter), server.NewRetrievalAskGetter), - Override(new(*server.GraphsyncUnpaidRetrieval), modules.RetrievalGraphsync(cfg.Retrievals.GraphsyncRetrievalConfig.SimultaneousTransfersForRetrieval)), + Override(new(*server.GraphsyncUnpaidRetrieval), modules.RetrievalGraphsync(cfg.Retrievals.Graphsync.SimultaneousTransfersForRetrieval)), Override(new(dtypes.StagingGraphsync), From(new(*server.GraphsyncUnpaidRetrieval))), Override(StartPieceDoctorKey, modules.NewPieceDoctor), @@ -516,7 +516,7 @@ func ConfigBoost(cfg *config.Boost) Option { // Lotus Markets (retrieval) Override(new(server.SectorAccessor), modules.NewSectorAccessor(cfg)), - Override(HandleRetrievalEventsKey, modules.HandleRetrievalGraphsyncUpdates(time.Duration(cfg.Retrievals.GraphsyncRetrievalConfig.RetrievalLogDuration), time.Duration(cfg.Retrievals.GraphsyncRetrievalConfig.StalledRetrievalTimeout))), + Override(HandleRetrievalEventsKey, modules.HandleRetrievalGraphsyncUpdates(time.Duration(cfg.Retrievals.Graphsync.RetrievalLogDuration), time.Duration(cfg.Retrievals.Graphsync.StalledRetrievalTimeout))), Override(HandleRetrievalAskKey, modules.HandleQueryAsk), Override(new(*lp2pimpl.TransportsListener), modules.NewTransportsListener(cfg)), Override(new(*protocolproxy.ProtocolProxy), modules.NewProtocolProxy(cfg)), @@ -539,8 +539,8 @@ func ConfigBoost(cfg *config.Boost) Option { // Boost retrieval deal filter Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(nil)), - If(cfg.Retrievals.GraphsyncRetrievalConfig.RetrievalFilter != "", - Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(dtypes.RetrievalDealFilter(dealfilter.CliRetrievalDealFilter(cfg.Retrievals.GraphsyncRetrievalConfig.RetrievalFilter)))), + If(cfg.Retrievals.Graphsync.RetrievalFilter != "", + Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(dtypes.RetrievalDealFilter(dealfilter.CliRetrievalDealFilter(cfg.Retrievals.Graphsync.RetrievalFilter)))), ), Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(&cfg.Dealpublish.MaxPublishDealsFee, storageadapter.PublishMsgConfig{ diff --git a/node/config/def.go b/node/config/def.go index ec91921c7..3570935dd 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -154,16 +154,16 @@ func DefaultBoost() *Boost { AllowPrivateIPs: false, }, Retrievals: RetrievalConfig{ - GraphsyncRetrievalConfig{ + Graphsync: GraphsyncRetrievalConfig{ SimultaneousTransfersForRetrieval: DefaultSimultaneousTransfers, RetrievalLogDuration: Duration(time.Hour * 24), StalledRetrievalTimeout: Duration(time.Second * 30), GraphsyncStorageAccessApiInfo: []string{}, }, - BitswapRetrievalConfig{ + Bitswap: BitswapRetrievalConfig{ BitswapPublicAddresses: []string{}, }, - HTTPRetrievalConfig{ + HTTP: HTTPRetrievalConfig{ HTTPRetrievalMultiaddr: "", }, }, diff --git a/node/modules/piecedirectory.go b/node/modules/piecedirectory.go index 5218004ea..5365ead0a 100644 --- a/node/modules/piecedirectory.go +++ b/node/modules/piecedirectory.go @@ -107,7 +107,7 @@ func NewMultiminerSectorAccessor(cfg *config.Boost) func(full v1api.FullNode) *l return func(full v1api.FullNode) *lib.MultiMinerAccessor { // Get the endpoints of all the miners that this boost node can query // for retrieval data when serving graphsync retrievals - storageApiInfos := cfg.Retrievals.GraphsyncRetrievalConfig.GraphsyncStorageAccessApiInfo + storageApiInfos := cfg.Retrievals.Graphsync.GraphsyncStorageAccessApiInfo if len(storageApiInfos) == 0 { // If the endpoints aren't explicitly configured, fall back to just // serving retrieval data from the same endpoint where data is stored to diff --git a/node/modules/retrieval.go b/node/modules/retrieval.go index 32f1e2742..22ea50cca 100644 --- a/node/modules/retrieval.go +++ b/node/modules/retrieval.go @@ -27,7 +27,7 @@ import ( // based off the host and boost config func bitswapMultiAddrs(cfg *config.Boost, h host.Host) ([]multiaddr.Multiaddr, error) { // if BitswapPublicAddresses is empty, that means we'll be serving bitswap directly from this host, so just return host multiaddresses - if len(cfg.Retrievals.BitswapRetrievalConfig.BitswapPublicAddresses) == 0 { + if len(cfg.Retrievals.Bitswap.BitswapPublicAddresses) == 0 { maddr, err := peer.AddrInfoToP2pAddrs(&peer.AddrInfo{ ID: h.ID(), Addrs: h.Addrs(), @@ -43,7 +43,7 @@ func bitswapMultiAddrs(cfg *config.Boost, h host.Host) ([]multiaddr.Multiaddr, e // parse all of the public multiaddrs var addrs []multiaddr.Multiaddr - for _, addrString := range cfg.Retrievals.BitswapRetrievalConfig.BitswapPublicAddresses { + for _, addrString := range cfg.Retrievals.Bitswap.BitswapPublicAddresses { addr, err := multiaddr.NewMultiaddr(addrString) if err != nil { return nil, fmt.Errorf("Could not parse bitswap address '%s' as multiaddr: %w", addrString, err) @@ -52,9 +52,9 @@ func bitswapMultiAddrs(cfg *config.Boost, h host.Host) ([]multiaddr.Multiaddr, e } // in order to make these multiaddrs fully dialable, we encapsulate the bitswap peer id inside of them - bsPeerID, err := peer.Decode(cfg.Retrievals.BitswapRetrievalConfig.BitswapPeerID) + bsPeerID, err := peer.Decode(cfg.Retrievals.Bitswap.BitswapPeerID) if err != nil { - return nil, fmt.Errorf("Could not parse bitswap peer id '%s': %w", cfg.Retrievals.BitswapRetrievalConfig.BitswapPeerID, err) + return nil, fmt.Errorf("Could not parse bitswap peer id '%s': %w", cfg.Retrievals.Bitswap.BitswapPeerID, err) } return peer.AddrInfoToP2pAddrs(&peer.AddrInfo{ ID: bsPeerID, @@ -83,12 +83,12 @@ func NewTransportsListener(cfg *config.Boost) func(h host.Host) (*lp2pimpl.Trans // If there's an http retrieval address specified, add HTTP to the list // of supported protocols - if cfg.Retrievals.HTTPRetrievalConfig.HTTPRetrievalMultiaddr != "" { - maddr, err := multiaddr.NewMultiaddr(cfg.Retrievals.HTTPRetrievalConfig.HTTPRetrievalMultiaddr) + if cfg.Retrievals.HTTP.HTTPRetrievalMultiaddr != "" { + maddr, err := multiaddr.NewMultiaddr(cfg.Retrievals.HTTP.HTTPRetrievalMultiaddr) if err != nil { msg := "HTTPRetrievalURL must be in multi-address format. " msg += "Could not parse '%s' as multiaddr: %w" - return nil, fmt.Errorf(msg, cfg.Retrievals.HTTPRetrievalConfig.HTTPRetrievalMultiaddr, err) + return nil, fmt.Errorf(msg, cfg.Retrievals.HTTP.HTTPRetrievalMultiaddr, err) } protos = append(protos, types.Protocol{ Name: "http", @@ -98,7 +98,7 @@ func NewTransportsListener(cfg *config.Boost) func(h host.Host) (*lp2pimpl.Trans // If there's a bitswap peer address specified, add bitswap to the list // of supported protocols - if cfg.Retrievals.BitswapRetrievalConfig.BitswapPeerID != "" { + if cfg.Retrievals.Bitswap.BitswapPeerID != "" { addrs, err := bitswapMultiAddrs(cfg, h) if err != nil { return nil, err @@ -183,8 +183,8 @@ func NewProtocolProxy(cfg *config.Boost) func(h host.Host) (*protocolproxy.Proto return func(h host.Host) (*protocolproxy.ProtocolProxy, error) { peerConfig := map[peer.ID][]protocol.ID{} // add bitswap if a peer id is set AND the peer is only private - if cfg.Retrievals.BitswapRetrievalConfig.BitswapPeerID != "" && len(cfg.Retrievals.BitswapRetrievalConfig.BitswapPublicAddresses) == 0 { - bsPeerID, err := peer.Decode(cfg.Retrievals.BitswapRetrievalConfig.BitswapPeerID) + if cfg.Retrievals.Bitswap.BitswapPeerID != "" && len(cfg.Retrievals.Bitswap.BitswapPublicAddresses) == 0 { + bsPeerID, err := peer.Decode(cfg.Retrievals.Bitswap.BitswapPeerID) if err != nil { return nil, err } From 1f28a5b547f9fb645065ba0c68be7552f3ba5d55 Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Wed, 20 Dec 2023 16:05:06 +0400 Subject: [PATCH 30/34] fix storage ask db command --- storagemarket/storedask/db.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/storagemarket/storedask/db.go b/storagemarket/storedask/db.go index ec9356e3a..ff05a3145 100644 --- a/storagemarket/storedask/db.go +++ b/storagemarket/storedask/db.go @@ -47,10 +47,12 @@ func (s *StorageAskDB) Update(ctx context.Context, ask legacytypes.StorageAsk) e err := row.Scan(&minerString) switch { case errors.Is(err, sql.ErrNoRows): + log.Debugf("inserting a new storage ask in db for miner: %s", minerString) return s.set(ctx, ask) case err != nil: return err default: + log.Debugf("updating the storage ask in db for miner: %s", minerString) return s.update(ctx, ask) } } @@ -64,9 +66,7 @@ func (s *StorageAskDB) set(ctx context.Context, ask legacytypes.StorageAsk) erro } func (s *StorageAskDB) update(ctx context.Context, ask legacytypes.StorageAsk) error { - qry := "UPDATE StorageAsk (Price, VerifiedPrice, MinPieceSize, MaxPieceSize, TS, Expiry, SeqNo) " - qry += "VALUES (?, ?, ?, ?, ?, ?, ?, ?) " - qry += "WHERE Miner=?" + qry := "UPDATE StorageAsk SET Price=?, VerifiedPrice=?, MinPieceSize=?, MaxPieceSize=?, TS=?, Expiry=?, SeqNo=? WHERE Miner=?" values := []interface{}{ask.Price.Int64(), ask.VerifiedPrice.Int64(), ask.MinPieceSize, ask.MaxPieceSize, ask.Timestamp, ask.Expiry, ask.SeqNo, ask.Miner.String()} _, err := s.db.ExecContext(ctx, qry, values...) return err From 30a239084e97363ac7364496b5e002c11434bc5a Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Mon, 15 Jan 2024 14:58:54 +0400 Subject: [PATCH 31/34] fix BoostLegacyDealByProposalCid api --- node/impl/boost.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/node/impl/boost.go b/node/impl/boost.go index 58587c925..59d6fcd31 100644 --- a/node/impl/boost.go +++ b/node/impl/boost.go @@ -11,6 +11,7 @@ import ( "github.com/filecoin-project/boost/lib/legacy" "github.com/filecoin-project/boost/node/impl/backupmgr" "github.com/filecoin-project/boost/piecedirectory" + "github.com/filecoin-project/boost/storagemarket/types/legacytypes" "github.com/multiformats/go-multihash" "go.opentelemetry.io/otel/attribute" @@ -172,8 +173,8 @@ func (sm *BoostAPI) BoostIndexerAnnounceDealRemoved(ctx context.Context, propCid return sm.IndexProvider.AnnounceBoostDealRemoved(ctx, propCid) } -func (sm *BoostAPI) BoostLegacyDealByProposalCid(ctx context.Context, propCid cid.Cid) (gfm_storagemarket.MinerDeal, error) { - return sm.LegacyStorageProvider.GetLocalDeal(propCid) +func (sm *BoostAPI) BoostLegacyDealByProposalCid(ctx context.Context, propCid cid.Cid) (legacytypes.MinerDeal, error) { + return sm.LegacyDealManager.ByPropCid(propCid) } func (sm *BoostAPI) BoostOfflineDealWithData(ctx context.Context, dealUuid uuid.UUID, filePath string, delAfterImport bool) (*api.ProviderDealRejectionInfo, error) { From 62948a7b7923599f9385f2899845c79379288414 Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Mon, 15 Jan 2024 15:23:26 +0400 Subject: [PATCH 32/34] make cbor-gen --- .../internal/internalchannel_cbor_gen.go | 44 ++++-- .../message1_1/transfer_message_cbor_gen.go | 4 +- .../message1_1/transfer_request_cbor_gen.go | 6 +- .../message1_1/transfer_response_cbor_gen.go | 141 ++++++++++-------- datatransfer/types_cbor_gen.go | 64 ++++++-- .../migrations/migrations_cbor_gen.go | 50 +++++-- .../contract_deal_proposal_types_cbor_gen.go | 2 +- .../migrations_mapenc_types_cbor_gen.go | 16 +- .../types/legacytypes/types_cbor_gen.go | 79 +++++++--- 9 files changed, 268 insertions(+), 138 deletions(-) diff --git a/datatransfer/channels/internal/internalchannel_cbor_gen.go b/datatransfer/channels/internal/internalchannel_cbor_gen.go index ca27388a3..343eaa94c 100644 --- a/datatransfer/channels/internal/internalchannel_cbor_gen.go +++ b/datatransfer/channels/internal/internalchannel_cbor_gen.go @@ -236,6 +236,7 @@ func (t *ChannelState) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.Initiator (peer.ID) (string) @@ -362,6 +363,7 @@ func (t *ChannelState) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.SentBlocksTotal (int64) (int64) @@ -628,15 +630,24 @@ func (t *ChannelState) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - var v EncodedVoucher - if err := v.UnmarshalCBOR(cr); err != nil { - return err - } + { - t.Vouchers[i] = v - } + if err := t.Vouchers[i].UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Vouchers[i]: %w", err) + } + + } + } + } // t.Initiator (peer.ID) (string) case "Initiator": @@ -721,15 +732,24 @@ func (t *ChannelState) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - var v EncodedVoucherResult - if err := v.UnmarshalCBOR(cr); err != nil { - return err - } + { - t.VoucherResults[i] = v - } + if err := t.VoucherResults[i].UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.VoucherResults[i]: %w", err) + } + } + + } + } // t.SentBlocksTotal (int64) (int64) case "SentBlocksTotal": { diff --git a/datatransfer/message/message1_1/transfer_message_cbor_gen.go b/datatransfer/message/message1_1/transfer_message_cbor_gen.go index dcf3a6d15..13d462b29 100644 --- a/datatransfer/message/message1_1/transfer_message_cbor_gen.go +++ b/datatransfer/message/message1_1/transfer_message_cbor_gen.go @@ -8,9 +8,9 @@ import ( "math" "sort" - "github.com/ipfs/go-cid" + cid "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" + xerrors "golang.org/x/xerrors" ) var _ = xerrors.Errorf diff --git a/datatransfer/message/message1_1/transfer_request_cbor_gen.go b/datatransfer/message/message1_1/transfer_request_cbor_gen.go index 53e63742a..3e3214ceb 100644 --- a/datatransfer/message/message1_1/transfer_request_cbor_gen.go +++ b/datatransfer/message/message1_1/transfer_request_cbor_gen.go @@ -8,10 +8,10 @@ import ( "math" "sort" - "github.com/filecoin-project/boost/datatransfer" - "github.com/ipfs/go-cid" + datatransfer "github.com/filecoin-project/boost/datatransfer" + cid "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" + xerrors "golang.org/x/xerrors" ) var _ = xerrors.Errorf diff --git a/datatransfer/message/message1_1/transfer_response_cbor_gen.go b/datatransfer/message/message1_1/transfer_response_cbor_gen.go index 5bdcd004b..35ec7f90d 100644 --- a/datatransfer/message/message1_1/transfer_response_cbor_gen.go +++ b/datatransfer/message/message1_1/transfer_response_cbor_gen.go @@ -5,16 +5,18 @@ package message1_1 import ( "fmt" "io" + "math" "sort" - "github.com/filecoin-project/boost/datatransfer" - "github.com/ipfs/go-cid" + datatransfer "github.com/filecoin-project/boost/datatransfer" + cid "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" + xerrors "golang.org/x/xerrors" ) var _ = xerrors.Errorf var _ = cid.Undef +var _ = math.E var _ = sort.Sort func (t *TransferResponse1_1) MarshalCBOR(w io.Writer) error { @@ -22,25 +24,10 @@ func (t *TransferResponse1_1) MarshalCBOR(w io.Writer) error { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{166}); err != nil { - return err - } - scratch := make([]byte, 9) + cw := cbg.NewCborWriter(w) - // t.Type (uint64) (uint64) - if len("Type") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Type\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Type"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("Type")); err != nil { - return err - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Type)); err != nil { + if _, err := cw.Write([]byte{166}); err != nil { return err } @@ -49,10 +36,10 @@ func (t *TransferResponse1_1) MarshalCBOR(w io.Writer) error { return xerrors.Errorf("Value in field \"Acpt\" was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Acpt"))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Acpt"))); err != nil { return err } - if _, err := io.WriteString(w, string("Acpt")); err != nil { + if _, err := cw.WriteString(string("Acpt")); err != nil { return err } @@ -65,10 +52,10 @@ func (t *TransferResponse1_1) MarshalCBOR(w io.Writer) error { return xerrors.Errorf("Value in field \"Paus\" was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Paus"))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Paus"))); err != nil { return err } - if _, err := io.WriteString(w, string("Paus")); err != nil { + if _, err := cw.WriteString(string("Paus")); err != nil { return err } @@ -76,19 +63,19 @@ func (t *TransferResponse1_1) MarshalCBOR(w io.Writer) error { return err } - // t.XferID (uint64) (uint64) - if len("XferID") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"XferID\" was too long") + // t.Type (uint64) (uint64) + if len("Type") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Type\" was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("XferID"))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Type"))); err != nil { return err } - if _, err := io.WriteString(w, string("XferID")); err != nil { + if _, err := cw.WriteString(string("Type")); err != nil { return err } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.XferID)); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Type)); err != nil { return err } @@ -97,14 +84,14 @@ func (t *TransferResponse1_1) MarshalCBOR(w io.Writer) error { return xerrors.Errorf("Value in field \"VRes\" was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("VRes"))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("VRes"))); err != nil { return err } - if _, err := io.WriteString(w, string("VRes")); err != nil { + if _, err := cw.WriteString(string("VRes")); err != nil { return err } - if err := t.VRes.MarshalCBOR(w); err != nil { + if err := t.VRes.MarshalCBOR(cw); err != nil { return err } @@ -113,10 +100,10 @@ func (t *TransferResponse1_1) MarshalCBOR(w io.Writer) error { return xerrors.Errorf("Value in field \"VTyp\" was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("VTyp"))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("VTyp"))); err != nil { return err } - if _, err := io.WriteString(w, string("VTyp")); err != nil { + if _, err := cw.WriteString(string("VTyp")); err != nil { return err } @@ -124,25 +111,47 @@ func (t *TransferResponse1_1) MarshalCBOR(w io.Writer) error { return xerrors.Errorf("Value in field t.VTyp was too long") } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.VTyp))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.VTyp))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.VTyp)); err != nil { + return err + } + + // t.XferID (uint64) (uint64) + if len("XferID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"XferID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("XferID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("XferID")); err != nil { return err } - if _, err := io.WriteString(w, string(t.VTyp)); err != nil { + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.XferID)); err != nil { return err } + return nil } -func (t *TransferResponse1_1) UnmarshalCBOR(r io.Reader) error { +func (t *TransferResponse1_1) UnmarshalCBOR(r io.Reader) (err error) { *t = TransferResponse1_1{} - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) + cr := cbg.NewCborReader(r) - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + maj, extra, err := cr.ReadHeader() if err != nil { return err } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + if maj != cbg.MajMap { return fmt.Errorf("cbor input should be of type map") } @@ -157,7 +166,7 @@ func (t *TransferResponse1_1) UnmarshalCBOR(r io.Reader) error { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadStringBuf(br, scratch) + sval, err := cbg.ReadString(cr) if err != nil { return err } @@ -166,25 +175,10 @@ func (t *TransferResponse1_1) UnmarshalCBOR(r io.Reader) error { } switch name { - // t.Type (uint64) (uint64) - case "Type": - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Type = uint64(extra) - - } - // t.Acpt (bool) (bool) + // t.Acpt (bool) (bool) case "Acpt": - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + maj, extra, err = cr.ReadHeader() if err != nil { return err } @@ -202,7 +196,7 @@ func (t *TransferResponse1_1) UnmarshalCBOR(r io.Reader) error { // t.Paus (bool) (bool) case "Paus": - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + maj, extra, err = cr.ReadHeader() if err != nil { return err } @@ -217,19 +211,19 @@ func (t *TransferResponse1_1) UnmarshalCBOR(r io.Reader) error { default: return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) } - // t.XferID (uint64) (uint64) - case "XferID": + // t.Type (uint64) (uint64) + case "Type": { - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + maj, extra, err = cr.ReadHeader() if err != nil { return err } if maj != cbg.MajUnsignedInt { return fmt.Errorf("wrong type for uint64 field") } - t.XferID = uint64(extra) + t.Type = uint64(extra) } // t.VRes (typegen.Deferred) (struct) @@ -239,7 +233,7 @@ func (t *TransferResponse1_1) UnmarshalCBOR(r io.Reader) error { t.VRes = new(cbg.Deferred) - if err := t.VRes.UnmarshalCBOR(br); err != nil { + if err := t.VRes.UnmarshalCBOR(cr); err != nil { return xerrors.Errorf("failed to read deferred field: %w", err) } } @@ -247,13 +241,28 @@ func (t *TransferResponse1_1) UnmarshalCBOR(r io.Reader) error { case "VTyp": { - sval, err := cbg.ReadStringBuf(br, scratch) + sval, err := cbg.ReadString(cr) if err != nil { return err } t.VTyp = datatransfer.TypeIdentifier(sval) } + // t.XferID (uint64) (uint64) + case "XferID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.XferID = uint64(extra) + + } default: // Field doesn't exist on this type, so ignore it diff --git a/datatransfer/types_cbor_gen.go b/datatransfer/types_cbor_gen.go index f28216574..e212d360b 100644 --- a/datatransfer/types_cbor_gen.go +++ b/datatransfer/types_cbor_gen.go @@ -152,6 +152,7 @@ func (t *ChannelStages) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } return nil } @@ -199,15 +200,34 @@ func (t *ChannelStages) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Stages[i] = new(ChannelStage) + if err := t.Stages[i].UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Stages[i] pointer: %w", err) + } + } + + } - var v ChannelStage - if err := v.UnmarshalCBOR(cr); err != nil { - return err } - - t.Stages[i] = &v } - return nil } @@ -271,6 +291,7 @@ func (t *ChannelStage) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } return nil } @@ -356,15 +377,34 @@ func (t *ChannelStage) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Logs[i] = new(Log) + if err := t.Logs[i].UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Logs[i] pointer: %w", err) + } + } + + } - var v Log - if err := v.UnmarshalCBOR(cr); err != nil { - return err } - - t.Logs[i] = &v } - return nil } diff --git a/markets/piecestore/migrations/migrations_cbor_gen.go b/markets/piecestore/migrations/migrations_cbor_gen.go index 76f5bda29..ff2513f6f 100644 --- a/markets/piecestore/migrations/migrations_cbor_gen.go +++ b/markets/piecestore/migrations/migrations_cbor_gen.go @@ -8,10 +8,10 @@ import ( "math" "sort" - "github.com/filecoin-project/go-state-types/abi" - "github.com/ipfs/go-cid" + abi "github.com/filecoin-project/go-state-types/abi" + cid "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" + xerrors "golang.org/x/xerrors" ) var _ = xerrors.Errorf @@ -51,6 +51,7 @@ func (t *PieceInfo0) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } return nil } @@ -110,15 +111,24 @@ func (t *PieceInfo0) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - var v DealInfo0 - if err := v.UnmarshalCBOR(cr); err != nil { - return err - } + { - t.Deals[i] = v - } + if err := t.Deals[i].UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Deals[i]: %w", err) + } + } + + } + } return nil } @@ -435,6 +445,7 @@ func (t *CIDInfo0) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } return nil } @@ -494,14 +505,23 @@ func (t *CIDInfo0) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - var v PieceBlockLocation0 - if err := v.UnmarshalCBOR(cr); err != nil { - return err - } + { - t.PieceBlockLocations[i] = v - } + if err := t.PieceBlockLocations[i].UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PieceBlockLocations[i]: %w", err) + } + } + + } + } return nil } diff --git a/storagemarket/types/contract_deal_proposal_types_cbor_gen.go b/storagemarket/types/contract_deal_proposal_types_cbor_gen.go index 974e23cba..4a218c407 100644 --- a/storagemarket/types/contract_deal_proposal_types_cbor_gen.go +++ b/storagemarket/types/contract_deal_proposal_types_cbor_gen.go @@ -40,7 +40,7 @@ func (t *ContractParamsVersion1) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.LocationRef))); err != nil { return err } - if _, err := io.WriteString(w, string(t.LocationRef)); err != nil { + if _, err := cw.WriteString(string(t.LocationRef)); err != nil { return err } diff --git a/storagemarket/types/legacytypes/migrations/migrations_mapenc_types_cbor_gen.go b/storagemarket/types/legacytypes/migrations/migrations_mapenc_types_cbor_gen.go index 0b1cbf2a8..56dbc9fc3 100644 --- a/storagemarket/types/legacytypes/migrations/migrations_mapenc_types_cbor_gen.go +++ b/storagemarket/types/legacytypes/migrations/migrations_mapenc_types_cbor_gen.go @@ -8,15 +8,15 @@ import ( "math" "sort" - "github.com/filecoin-project/boost/datatransfer" - "github.com/filecoin-project/boost/storagemarket/types/legacytypes" - "github.com/filecoin-project/boost/storagemarket/types/legacytypes/filestore" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p/core/peer" + datatransfer "github.com/filecoin-project/boost/datatransfer" + legacytypes "github.com/filecoin-project/boost/storagemarket/types/legacytypes" + filestore "github.com/filecoin-project/boost/storagemarket/types/legacytypes/filestore" + abi "github.com/filecoin-project/go-state-types/abi" + market "github.com/filecoin-project/specs-actors/actors/builtin/market" + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p/core/peer" cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" + xerrors "golang.org/x/xerrors" ) var _ = xerrors.Errorf diff --git a/storagemarket/types/legacytypes/types_cbor_gen.go b/storagemarket/types/legacytypes/types_cbor_gen.go index 0b2b3d00c..f3cbe405b 100644 --- a/storagemarket/types/legacytypes/types_cbor_gen.go +++ b/storagemarket/types/legacytypes/types_cbor_gen.go @@ -8,15 +8,15 @@ import ( "math" "sort" - "github.com/filecoin-project/boost/datatransfer" - "github.com/filecoin-project/boost/storagemarket/types/legacytypes/filestore" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/builtin/v9/market" - "github.com/filecoin-project/go-state-types/crypto" - "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p/core/peer" + datatransfer "github.com/filecoin-project/boost/datatransfer" + filestore "github.com/filecoin-project/boost/storagemarket/types/legacytypes/filestore" + abi "github.com/filecoin-project/go-state-types/abi" + market "github.com/filecoin-project/go-state-types/builtin/v9/market" + crypto "github.com/filecoin-project/go-state-types/crypto" + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p/core/peer" cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" + xerrors "golang.org/x/xerrors" ) var _ = xerrors.Errorf @@ -2666,6 +2666,7 @@ func (t *DealStages) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } return nil } @@ -2729,13 +2730,33 @@ func (t *DealStages) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Stages[i] = new(DealStage) + if err := t.Stages[i].UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Stages[i] pointer: %w", err) + } + } - var v DealStage - if err := v.UnmarshalCBOR(cr); err != nil { - return err - } + } - t.Stages[i] = &v + } } default: @@ -2781,6 +2802,7 @@ func (t *DealStage) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.Name (string) (string) @@ -2945,15 +2967,34 @@ func (t *DealStage) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Logs[i] = new(Log) + if err := t.Logs[i].UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Logs[i] pointer: %w", err) + } + } - var v Log - if err := v.UnmarshalCBOR(cr); err != nil { - return err - } + } - t.Logs[i] = &v + } } - // t.Name (string) (string) case "Name": From dfb23e99d03c604a59123e982f1076f636a182f3 Mon Sep 17 00:00:00 2001 From: LexLuthr Date: Mon, 15 Jan 2024 15:39:40 +0400 Subject: [PATCH 33/34] fix missing api example --- api/docgen/docgen.go | 2 + build/openrpc/boost.json.gz | Bin 2560 -> 2992 bytes documentation/en/api-v1-methods.md | 1211 ++++++++++++++++++++++++++++ 3 files changed, 1213 insertions(+) diff --git a/api/docgen/docgen.go b/api/docgen/docgen.go index 944419058..0af0731bb 100644 --- a/api/docgen/docgen.go +++ b/api/docgen/docgen.go @@ -14,6 +14,7 @@ import ( "github.com/filecoin-project/boost/datatransfer" "github.com/filecoin-project/boost/retrievalmarket/types/legacyretrievaltypes" + "github.com/filecoin-project/boost/storagemarket/types/legacytypes/filestore" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-jsonrpc/auth" @@ -136,6 +137,7 @@ func init() { allocationID := verifreg.AllocationId(0) addExample(allocationID) addExample(&allocationID) + addExample(filestore.Path("")) addExample(map[string]int{"name": 42}) addExample(map[string]time.Time{"name": time.Unix(1615243938, 0).UTC()}) addExample(&types.ExecutionTrace{ diff --git a/build/openrpc/boost.json.gz b/build/openrpc/boost.json.gz index 047a211ed3e0bdf20fc3176120ce06a1386f3a41..c76d48e4a13fa7baa3506bbf7edfbeeddc565627 100644 GIT binary patch literal 2992 zcmV;h3s3YPiwFP!00000|Lk3TQ`U`@n>6<2G*8nzci0VakRa zx9_l`$_3oUN1RKAdgsTebKb=kz5yXAXSj`ZVjkqIm`|}-U#M86hQ5S4Z zS?$Q>_O(=8zU)Ouv{lX?+*w?RLv4v&ec|?qH z8()k-03req;|h&IA;QMU=AH*kNi^gFsS%YZVrmUr*e8N`5mN%8nL8No`CQVFhw;aA z%=a4^M#pTL+ll_h9)U;F*B$bfDb#g@2MIE10=xX3Xn_LHMVddT-?SZrF_}m-@gin)NJnU z4`Vdg^9BDKY_&hodcHq!`C#88Qi0efKHc{Pza9VY3Rvxm|L3gn`Vdbh)(S}J)9+a> z7Kar*eKoXDrV8VVrf9~N`r3dJ?=l}$DwOy!QG&9p&5WL{xF8mI1H+|!3?FOG5;Pr> z;y|O>N^L(L@svTNS%0+(L{5oZ7eG#lTn!!r&fQ|@aZ02aIU;VZg&xrsE97`?$f2b! z!`b-ZYTrVyl-iA+2ZTLsBxwAUg9Li=36LzimZLy2Ee`>LfI78LpSmZzXy=9WG%scC zT<{y}fY1~BimZrNk|oTjz^Gda6H*(#cRZLLzL+ape--`NGyIeM(e5o(5HG!sq088{ zOACACwi^*`99r0;EcIP7o(Mt48|Y&G?PvDAbm(eE-eEodQ$QWuF2+;~9}Cb2=qNp< zUT^G$e=q9wcKG)%J;bHJLfJVN+~<S>kgV{-eu)vSzqz``%AT-}`qj}Q$h4bIvf4}|y%U^%q!08{#e}CAK;?vJZ_V2f+ zNALR0Q~8#^zql12&TfABLk+PN3XG=O<6Ld;_PT7q1LkOuJts=DFS2OphPZ*2YII=h*I$n6Rg-_071j@3`!jZ%rG&S!>gfe|RP2=T(mZ9@t;8HmX1(gM^YvDfF ziiZq*ZQ);sLkTJoxHDi^YLt9;JOcaL=i2*jH`3oOQ=GR#WVhb~6)G_a6vr0+cr;cZ zdtd{-(Kzzj!taPwdK875nXGph;{mhbf_Kk9cB7{nZiwTA&>Sfz2t_p%#Aeq|J-W+~ zxQiifYTy^SX298aM*RI`o-yUdcfuLkQzM#(4pFeDs0XuIu^Tj5kbmR8jTlveIR;fN zF+16q2M!It7*IyU_${-!qmQ7KO2T2N9GSyK3b2}av$=er9iI>d=<$o^*brt}sKwxleLv^guXTeE2^yXw3p&MIS8OihA2 zZ@%sfDnD~filW4#F)MfVGvDbh@iZ3$6|-NtlJBd%%-G4C#5Tjgjs>G&!oZBxHIPYp z7A6vj(JLZNiG>hSwayPW+cOtwnKgn%b3l z5=->+bMoj(hx*OeD?puQ||K?T@rS1L>==cOl- z0B$xiy6#=H^CBJ8^+2D8a@J)-zB*qqrMATsMfxoFo|VYsOPRR#Qy2{tjB7it%WV>J z_yo7>iBAy0`*xz7NXt8pGZ$_ai81p7qZ4pJL1(BybUl5MiT3rt<2Ue$Ngysb$J`$$ zpY*S(uO0ftyvM$8zI{!^mYv8rXCYJM9ms;Pn43;%$+3PoLHY*Iq zQKcNmb4E=rRXxK_sfvab10bWm^%KP%?xe zyHQAy8&Tv?GRGGsLyP=fKJ;97*Z?Etu%KN&3*l{tmd97GNg8-%Zt7ukm0i z9Uti4y`_XXg@WPF^D*z`{0j#bh52%OF9@iBr~;ybK-8+|y%jB`YiCrdDN|vCoWcU3 zA}?X|rk6RLSQL*IR$KapM#C~|LsRyYRTTFlYqt4)j0TA^m=p7;BDUq|+nYBFHf8&- zvstuAYZ|FXihYf|m$~W5XK+RAan!!JSzl)krPb)?h7+Tzh7XiQN%@uNm$Tdw_%p6k z9{%#rS>fMv!#|@)Wmugzs>lzTs^Xe^Dz2H;Qf#$GxZ^nQ0_Mn&Xw-9<^fk<$ghH@> z59jnk)|?+T7`G3au>i!C$=JdlKoN&1GJDc@i5x8yGn?8KKY*0v%1}uZa-#NRG$8G_4KRunW}xJYM-gvXVwcdTFc(z8!Mo`w%@55 zg?6}n#9^Sa^4IxT$Zw*E{h%eaiJ!0-D0-cZd(7+uLY}+`iDj8mpQL3`;}?vIU@FTP_h&{Ju^2Q6qh~c_B5u6 zY}&)Pu%XBH`<)*gP^u=%v({nGtAx&-qky_jSY7&#i|LOLN@+k2k(EVgx!RCJa8t#| m-{)h~k*enGk=2|%H@tY&#FNS8Eh=A literal 2560 zcmV+b3jg&ViwFP!00000|Lk3BQ`i9cw}J@)POq@w=W(rBc2}hmbzeb z%4&Nqw{NB50^Whb7Ir||g8C{VaeaM#!!KDsU^ZHl=v6q`sS<&$*3s0^HNTE*&J^6M z+J5w33;7LXQ>V61oprQ!an&0tun-8r#X5SpWS5Hek*sx?0>Owo%?sMuL5#Xlze^=R z1%jbh48Fyb)-7}>#7C}91K;ODfunnl^sl&c9_cl|u6bL(WTQ)VeSM8BECbIYVu;)L zYybif5r7z1Xb1`sHbge}JYY(qJ{L$0s6>%aYuLg*5yXpx5(q8a-f+X`l7=#jA5RJ2 zZ)5}=i)o4z{lOl9N7CCJ%9bhAaeyh>ryw%4_Njve_yT0eF+mdC{zlZNN7+rZPfQ`= zI0B>u2hlOL8tN!S!*zP#pxzJ>^jD-(79wb$J7A-=Zv$!%5M>Uvi2^-`7r2Vr`ss#G z>|1cgv#6mD>xeK15%fX#8W1_qa)W!~`7T7F+0=3){b95Bo!CHH-=s6x9=1qpTlHG< z7CiqezyH?{hTQYN_Pjm}`>m}$Z3V;aAb_pr-4-5QSy)eo+ZR|WLD>zy8d+GtzX28C z;I`HreV&_e<|t^m1|-C_#|!o_VoP|BC>&CQ#zOU>rqWKb7C1~6wC4ok> zm0~~c@iT)+v))_;B1c3n3m`{CE(VVt=Wa3dI3m)F9Fa7aLXU{W3ORl^t_?7(A;Vo4VPrZzz%lNe`3wz|g z6A^7}TiBy4^(pC3gdoF5*kbkx0)9I3xq~^Cp6Vq z{t;uijr;XRv$cgSoX9QRc1(KUMp8RfM5sfb+XGFuQEyoIkU7&0r-}=516@IF=z@qJ z;(ChuK6hQBKxj_F2N5GP&7-9Kl-@8B8Pfwk3$?0g;RB)wZfDcD(a6Fb`KjM`DTB#M zfv~`j#-tbD1rS>9xY0alz32S%yYKhk-+%e%4vzj-{=4m6DbD}gvp?S-?Y%v19?3)g z?(ANCIKF%Tx9VdnR2WV5gmbmJ-RrO(516Atwo8;2U*yry9dQG#%V&Zx+1LA&NMyE0 zw7hU>Gf%oP0%b*K;Yee)np*Y)LYZEkrtx}9>rgl7xl|59MP-5OTKE)f#X|wUvGAvU zUxG>$?ikpO8YG|g2Vmd&Tu0ySM*8DC!#NZp%l-+d(1=N(IJWS|y`chm0yfY)jU#U? z{FX?idr`QX==v^WJYY7Q@lN++Cu-GjLmVfB=EyifD5{|#cDsJ6=^;bnA%?iIgP)a} z0cYb8@#oRBV9NF14M*rmjc6Gnd!1XSC+yAZ;NP4jNN4? zN&BO@sYhlnQhuihe-nRGnni;zDN7^m&Wdnrc5P*6o%Y08W6Y|lNpYv$*Mq_2CxJ;( zl$dp9<+gqjJ3SGO5VIL?Ur|MYL(w za}^nrxY$CrmGahQ_cC^}1_4Ywei0thaY|`;%~9+mep%966bGlVUzsK`$38ETrk^_1 zZ*DC}_4kI_&x4aV&!}{@O!jY}w74Yr$Z~*T&ec{3%!{guh`(DAu_|0tg^Q|iQ57z# z!bMfMs0tTV;i4*BRE3MGa8VU5s=~z+6)sw@=M^rf0DI{-6~=?}$`eTfH|ZH|_c~g8 zl}_qTK(B{#)?t0VxLz@)w!#ucdM!_0mB^DznY{Kq%mxbPwY#p%Z4yfO0JrN&Oc25Q zYOtU0z z3mxpLTx&An3O#FT4bSITpWdY!s*+V<_<0M%PgAaahHC5+hc8w6uPXml<-eb({I`Nq z-4D+n7Sz&K8?slGs4CU?EmMuxf6PfWI?RE45Q*vRx~|^AvR_4s4KJd$_rN@v1(ygxc4Hfi7F#kcB(xEJ$6+zYkbKi-tJ+#{v@Ravj5l~>=o zxQgWaSaa5MJex_!3wrWmS0bE3QQ5_G@O3r)&DU9FzFOTkRVyH>fT$=Cy{Rt-qGKx6 zSg7!b;C6;kk=ww$@n(((7R95P+2&pqW|(GaXv(g#dguOEwrumoUHx7iK&%cB zs{_R90I@nitPT*X1H|e8u{uDk4iL+v8I8QlOZDK`0QI(grfLxS;qn=Wfy%-Q4K3t1 zQ6zE;`7NY(=5cw5fs56pB<3aRCFzCe6;xLtKE+jt6UFtL+1#yoHGHNRzHAkB!cFT3 zU5r{G^)HLm&78|+KQRqGd&IsCN}j=`N9LA;;`6rLw>r{99{U!@3_~yN|8Vi_fKt6i za@=a=T(s16_5$iU;jzYBF2-LTDrEsVL>8X? Date: Mon, 15 Jan 2024 21:37:40 +0400 Subject: [PATCH 34/34] migrate config, dump dagstore config --- indexprovider/wrapper.go | 6 ------ node/builder.go | 3 --- node/config/def.go | 5 ----- node/config/doc_gen.go | 6 ------ node/config/migrate.go | 3 ++- node/config/types.go | 1 - node/config/v5_to_v6.go | 28 ++++++++++++++++++++++++++++ 7 files changed, 30 insertions(+), 22 deletions(-) create mode 100644 node/config/v5_to_v6.go diff --git a/indexprovider/wrapper.go b/indexprovider/wrapper.go index 762b47af0..509660ba3 100644 --- a/indexprovider/wrapper.go +++ b/indexprovider/wrapper.go @@ -7,7 +7,6 @@ import ( "fmt" "net/url" "os" - "path/filepath" "github.com/filecoin-project/boost/lib/legacy" "github.com/filecoin-project/boost/storagemarket/types/legacytypes" @@ -45,7 +44,6 @@ import ( ) var log = logging.Logger("index-provider-wrapper") -var defaultDagStoreDir = "dagstore" type Wrapper struct { enabled bool @@ -76,10 +74,6 @@ func NewWrapper(cfg *config.Boost) func(lc fx.Lifecycle, h host.Host, r repo.Loc ssm *sectorstatemgr.SectorStateMgr, meshCreator idxprov.MeshCreator, storageService lotus_modules.MinerStorageService) (*Wrapper, error) { - if cfg.DAGStore.RootDir == "" { - cfg.DAGStore.RootDir = filepath.Join(r.Path(), defaultDagStoreDir) - } - _, isDisabled := prov.(*DisabledIndexProvider) // bitswap is enabled if there is a bitswap peer id diff --git a/node/builder.go b/node/builder.go index 6fa8b0d8e..4ae61cde6 100644 --- a/node/builder.go +++ b/node/builder.go @@ -430,9 +430,6 @@ func ConfigBoost(cfg *config.Boost) Option { if err != nil { return Error(fmt.Errorf("failed to parse cfg.Wallets.Miner: %s; err: %w", cfg.Wallets.Miner, err)) } - if len(cfg.DAGStore.RootDir) > 0 { - return Error(fmt.Errorf("Detected custom DAG store path %s. The DAG store must be at $BOOST_PATH/dagstore", cfg.DAGStore.RootDir)) - } if cfg.HttpDownload.NChunks < 1 || cfg.HttpDownload.NChunks > 16 { return Error(errors.New("HttpDownload.NChunks should be between 1 and 16")) diff --git a/node/config/def.go b/node/config/def.go index b30b14d8f..817e7411f 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -117,11 +117,6 @@ func DefaultBoost() *Boost { FundsTaggingEnabled: true, }, - DAGStore: lotus_config.DAGStoreConfig{ - MaxConcurrentIndex: 5, - MaxConcurrencyStorageCalls: 100, - GCInterval: lotus_config.Duration(1 * time.Minute), - }, IndexProvider: IndexProviderConfig{ Enable: true, EntriesCacheCapacity: 1024, diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index c7043d919..64d734e2c 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -141,12 +141,6 @@ a file containing the booster-bitswap peer id's private key. Can be left blank w Comment: ``, }, - { - Name: "DAGStore", - Type: "lotus_config.DAGStoreConfig", - - Comment: ``, - }, { Name: "IndexProvider", Type: "IndexProviderConfig", diff --git a/node/config/migrate.go b/node/config/migrate.go index 67d26b164..849cef3b5 100644 --- a/node/config/migrate.go +++ b/node/config/migrate.go @@ -15,7 +15,7 @@ var log = logging.Logger("cfg") // CurrentVersion is the config version expected by Boost. // We need to migrate the config file to this version. -const CurrentVersion = 5 +const CurrentVersion = 6 type migrateUpFn = func(cfgPath string) (string, error) @@ -25,6 +25,7 @@ var migrations = []migrateUpFn{ v2Tov3, // index 2 => version 3 v3Tov4, // index 3 => version 4 v4Tov5, // index 4 => version 5 + v5Tov6, // index 5 => version 6 } // This struct is used to get the config file version diff --git a/node/config/types.go b/node/config/types.go index d019b8bec..d6717ce39 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -50,7 +50,6 @@ type Boost struct { ContractDeals ContractDealsConfig HttpDownload HttpDownloadConfig Retrievals RetrievalConfig - DAGStore lotus_config.DAGStoreConfig IndexProvider IndexProviderConfig } diff --git a/node/config/v5_to_v6.go b/node/config/v5_to_v6.go new file mode 100644 index 000000000..31df72b5d --- /dev/null +++ b/node/config/v5_to_v6.go @@ -0,0 +1,28 @@ +package config + +import ( + "fmt" +) + +// Migrate from config version 5 to version 6 +func v5Tov6(cfgPath string) (string, error) { + cfg, err := FromFile(cfgPath, DefaultBoost()) + if err != nil { + return "", fmt.Errorf("parsing config file %s: %w", cfgPath, err) + } + + boostCfg, ok := cfg.(*Boost) + if !ok { + return "", fmt.Errorf("unexpected config type %T: expected *config.Boost", cfg) + } + + // Update the Boost config version + boostCfg.ConfigVersion = 6 + + bz, err := ConfigUpdate(boostCfg, DefaultBoost(), true, false) + if err != nil { + return "", fmt.Errorf("applying configuration: %w", err) + } + + return string(bz), nil +}